aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-09-02 08:33:42 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-09-02 08:33:42 -0400
commite93c28f39375558409329a02a767d5cadfcc4a31 (patch)
tree9f1b4b5ce765b887b6002cded59fc934e6c9c012
parent85a62bf9d8ef8d533635270ae985281c58e8c974 (diff)
parent6fa2d197936ba0b8936e813d0adecefac160062b (diff)
Merge tag 'drm-intel-next-fixes-2015-09-02' into drm-intel-next-queued
Backmerge -fixes since there's more DDI-E related cleanups on top of the pile of -fixes for skl that just landed for 4.3. Conflicts: drivers/gpu/drm/i915/intel_display.c drivers/gpu/drm/i914/intel_dp.c drivers/gpu/drm/i915/intel_lrc.c Conflicts are all fairly harmless adjacent line stuff. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/drm/msm/dsi.txt41
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi.txt3
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt8
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt72
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b080uan01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/lg,lg4573.txt19
-rw-r--r--Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/video/fsl,dcu.txt22
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/k2e.dtsi15
-rw-r--r--arch/arm/boot/dts/k2hk.dtsi11
-rw-r--r--arch/arm/boot/dts/k2l.dtsi16
-rw-r--r--arch/arm/boot/dts/keystone.dtsi11
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4.dtsi3
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi53
-rw-r--r--arch/arm/configs/exynos_defconfig6
-rw-r--r--arch/arm/configs/multi_v7_defconfig6
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c1
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/x86/entry/entry_64_compat.S3
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/switch_to.h12
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c8
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/init.c7
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/step.c4
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--block/blk-settings.c4
-rw-r--r--crypto/authencesn.c44
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/ata/ahci_brcmstb.c6
-rw-r--r--drivers/ata/libata-core.c24
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c128
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c6
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/nx/nx-sha256.c27
-rw-r--r--drivers/crypto/nx/nx-sha512.c28
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/gpu/drm/Kconfig28
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c838
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c333
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c563
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c853
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_family.h)52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h39
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1246
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h1282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h6080
-rw-r--r--drivers/gpu/drm/amd/include/atom-bits.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-bits.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atom-names.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-names.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atom-types.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-types.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h (renamed from drivers/gpu/drm/amd/amdgpu/atombios.h)0
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h624
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h135
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h (renamed from drivers/gpu/drm/amd/amdgpu/pptable.h)6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c462
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h162
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c78
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c33
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c48
-rw-r--r--drivers/gpu/drm/ast/ast_main.c16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c218
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c36
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c16
-rw-r--r--drivers/gpu/drm/bridge/Kconfig24
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c (renamed from drivers/gpu/drm/bridge/ptn3460.c)0
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c (renamed from drivers/gpu/drm/bridge/ps8622.c)0
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c41
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c15
-rw-r--r--drivers/gpu/drm/drm_atomic.c15
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c6
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c45
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c336
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c330
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c52
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c123
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c186
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c286
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c138
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c174
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c106
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c348
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c107
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1021
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c198
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig18
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c210
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h19
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c404
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h197
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c43
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c261
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h17
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c182
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c48
-rw-r--r--drivers/gpu/drm/i915/Kconfig15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c48
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c119
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h13
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c75
-rw-r--r--drivers/gpu/drm/i915/intel_display.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c75
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c9
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c61
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c39
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c25
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c212
-rw-r--r--drivers/gpu/drm/msm/Kconfig15
-rw-r--r--drivers/gpu/drm/msm/Makefile15
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h33
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h206
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c58
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h43
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h211
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c92
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h44
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c270
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c216
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c (renamed from drivers/gpu/drm/msm/dsi/dsi_phy.c)413
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h89
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c150
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c166
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c42
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c31
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h22
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c79
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h28
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c101
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c1437
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c52
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c57
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c133
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c243
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h43
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c80
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h57
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c334
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c82
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h19
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c34
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c29
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c38
-rw-r--r--drivers/gpu/drm/panel/Kconfig16
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c298
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c (renamed from drivers/gpu/drm/panel/panel-ld9040.c)2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c (renamed from drivers/gpu/drm/panel/panel-s6e8aa0.c)2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c99
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c40
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c12
-rw-r--r--drivers/gpu/drm/sti/Makefile7
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c141
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c (renamed from drivers/gpu/drm/sti/sti_drm_crtc.c)213
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c243
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h5
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c251
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c (renamed from drivers/gpu/drm/sti/sti_drm_drv.c)147
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h (renamed from drivers/gpu/drm/sti/sti_drm_drv.h)6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c536
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c482
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c213
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h131
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c72
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h27
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c122
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h71
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c54
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c72
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h19
-rw-r--r--drivers/gpu/drm/tegra/dc.c294
-rw-r--r--drivers/gpu/drm/tegra/dc.h24
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c63
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h2
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.h10
-rw-r--r--drivers/gpu/drm/tegra/dsi.c127
-rw-r--r--drivers/gpu/drm/tegra/dsi.h4
-rw-r--r--drivers/gpu/drm/tegra/fb.c35
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c79
-rw-r--r--drivers/gpu/drm/tegra/output.c20
-rw-r--r--drivers/gpu/drm/tegra/rgb.c50
-rw-r--r--drivers/gpu/drm/tegra/sor.c1607
-rw-r--r--drivers/gpu/drm/tegra/sor.h298
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/includeCheck.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h110
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h2071
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h457
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h1487
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h99
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1204
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h1633
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h (renamed from drivers/gpu/drm/vmwgfx/svga_escape.h)2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h (renamed from drivers/gpu/drm/vmwgfx/svga_overlay.h)10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h (renamed from drivers/gpu/drm/vmwgfx/svga_reg.h)664
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h46
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h2627
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h912
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h209
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c1303
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c786
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c662
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c500
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h335
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1939
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c560
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1651
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h192
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c212
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c277
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c556
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c500
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c555
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1266
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c309
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--drivers/gpu/host1x/mipi.c253
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c95
-rw-r--r--drivers/gpu/vga/vgaarb.c142
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/wacom_sys.c70
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c16
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c7
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/Kconfig1
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c116
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c122
-rw-r--r--drivers/media/rc/nuvoton-cir.c127
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h36
-rw-r--r--drivers/media/rc/rc-ir-raw.c139
-rw-r--r--drivers/media/rc/rc-loopback.c36
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c40
-rw-r--r--drivers/memory/omap-gpmc.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/arizona-core.c16
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c191
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/ti/netcp.h1
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c35
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c78
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c22
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c62
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c201
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/scsi_error.c31
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/target_core_configfs.c9
-rw-r--r--drivers/target/target_core_hba.c10
-rw-r--r--drivers/target/target_core_spc.c44
-rw-r--r--drivers/thermal/cpu_cooling.c73
-rw-r--r--drivers/thermal/power_allocator.c8
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/xen/events/events_base.c10
-rw-r--r--drivers/xen/events/events_fifo.c45
-rw-r--r--drivers/xen/events/events_internal.h7
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/namei.c2
-rw-r--r--include/drm/drmP.h33
-rw-r--r--include/drm/drm_crtc.h8
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_edid.h19
-rw-r--r--include/drm/drm_fb_helper.h212
-rw-r--r--include/drm/drm_modeset_lock.h1
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/drm_plane_helper.h45
-rw-r--r--include/linux/ata.h18
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/media/rc-core.h7
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/sound/soc-topology.h12
-rw-r--r--include/uapi/drm/vmwgfx_drm.h38
-rw-r--r--include/uapi/sound/asoc.h6
-rw-r--r--ipc/sem.c47
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/irq/chip.c19
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/time/timer.c4
-rw-r--r--mm/cma.h2
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory-failure.c22
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c2
-rw-r--r--net/9p/client.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c18
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/core/datagram.c13
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/skbuff.c39
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/igmp.c33
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/mcast_snoop.c33
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c19
-rw-r--r--net/ipv6/route.c85
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c11
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_synproxy_core.c4
-rw-r--r--net/netfilter/xt_CT.c5
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/openvswitch/actions.c16
-rw-r--r--net/rds/info.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/sch_fq_codel.c22
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/soc/Kconfig3
-rw-r--r--sound/soc/Makefile3
-rw-r--r--sound/usb/card.c2
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/util/machine.c20
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/thread.c6
627 files changed, 48154 insertions, 17090 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644
index 000000000000..cca6d870f7a5
--- /dev/null
+++ b/.get_maintainer.ignore
@@ -0,0 +1 @@
Christoph Hellwig <hch@lst.de>
diff --git a/.mailmap b/.mailmap
index b4091b7a78fe..4b31af54ccd5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
17Al Viro <viro@ftp.linux.org.uk> 17Al Viro <viro@ftp.linux.org.uk>
18Al Viro <viro@zenIV.linux.org.uk> 18Al Viro <viro@zenIV.linux.org.uk>
19Andreas Herrmann <aherrman@de.ibm.com> 19Andreas Herrmann <aherrman@de.ibm.com>
20Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
20Andrew Morton <akpm@linux-foundation.org> 21Andrew Morton <akpm@linux-foundation.org>
21Andrew Vasquez <andrew.vasquez@qlogic.com> 22Andrew Vasquez <andrew.vasquez@qlogic.com>
22Andy Adamson <andros@citi.umich.edu> 23Andy Adamson <andros@citi.umich.edu>
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794cef0b8..91e6e5c478d0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
199 "qcom,kpss-acc-v1" 199 "qcom,kpss-acc-v1"
200 "qcom,kpss-acc-v2" 200 "qcom,kpss-acc-v2"
201 "rockchip,rk3066-smp" 201 "rockchip,rk3066-smp"
202 "ste,dbx500-smp"
202 203
203 - cpu-release-addr 204 - cpu-release-addr
204 Usage: required for systems that have an "enable-method" 205 Usage: required for systems that have an "enable-method"
diff --git a/Documentation/devicetree/bindings/drm/msm/dsi.txt b/Documentation/devicetree/bindings/drm/msm/dsi.txt
index cd8fe6cf536c..d56923cd5590 100644
--- a/Documentation/devicetree/bindings/drm/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/dsi.txt
@@ -30,20 +30,27 @@ Optional properties:
30- panel@0: Node of panel connected to this DSI controller. 30- panel@0: Node of panel connected to this DSI controller.
31 See files in Documentation/devicetree/bindings/panel/ for each supported 31 See files in Documentation/devicetree/bindings/panel/ for each supported
32 panel. 32 panel.
33- qcom,dual-panel-mode: Boolean value indicating if the DSI controller is 33- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
34 driving a panel which needs 2 DSI links. 34 driving a panel which needs 2 DSI links.
35- qcom,master-panel: Boolean value indicating if the DSI controller is driving 35- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
36 the master link of the 2-DSI panel. 36 the master link of the 2-DSI panel.
37- qcom,sync-dual-panel: Boolean value indicating if the DSI controller is 37- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
38 driving a 2-DSI panel whose 2 links need receive command simultaneously. 38 driving a 2-DSI panel whose 2 links need receive command simultaneously.
39- interrupt-parent: phandle to the MDP block if the interrupt signal is routed 39- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
40 through MDP block 40 through MDP block
41- pinctrl-names: the pin control state names; should contain "default"
42- pinctrl-0: the default pinctrl state (active)
43- pinctrl-n: the "sleep" pinctrl state
44- port: DSI controller output port. This contains one endpoint subnode, with its
45 remote-endpoint set to the phandle of the connected panel's endpoint.
46 See Documentation/devicetree/bindings/graph.txt for device graph info.
41 47
42DSI PHY: 48DSI PHY:
43Required properties: 49Required properties:
44- compatible: Could be the following 50- compatible: Could be the following
45 * "qcom,dsi-phy-28nm-hpm" 51 * "qcom,dsi-phy-28nm-hpm"
46 * "qcom,dsi-phy-28nm-lp" 52 * "qcom,dsi-phy-28nm-lp"
53 * "qcom,dsi-phy-20nm"
47- reg: Physical base address and length of the registers of PLL, PHY and PHY 54- reg: Physical base address and length of the registers of PLL, PHY and PHY
48 regulator 55 regulator
49- reg-names: The names of register regions. The following regions are required: 56- reg-names: The names of register regions. The following regions are required:
@@ -59,6 +66,10 @@ Required properties:
59 * "iface_clk" 66 * "iface_clk"
60- vddio-supply: phandle to vdd-io regulator device node 67- vddio-supply: phandle to vdd-io regulator device node
61 68
69Optional properties:
70- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
71 regulator is wanted.
72
62Example: 73Example:
63 mdss_dsi0: qcom,mdss_dsi@fd922800 { 74 mdss_dsi0: qcom,mdss_dsi@fd922800 {
64 compatible = "qcom,mdss-dsi-ctrl"; 75 compatible = "qcom,mdss-dsi-ctrl";
@@ -90,9 +101,13 @@ Example:
90 101
91 qcom,dsi-phy = <&mdss_dsi_phy0>; 102 qcom,dsi-phy = <&mdss_dsi_phy0>;
92 103
93 qcom,dual-panel-mode; 104 qcom,dual-dsi-mode;
94 qcom,master-panel; 105 qcom,master-dsi;
95 qcom,sync-dual-panel; 106 qcom,sync-dual-dsi;
107
108 pinctrl-names = "default", "sleep";
109 pinctrl-0 = <&mdss_dsi_active>;
110 pinctrl-1 = <&mdss_dsi_suspend>;
96 111
97 panel: panel@0 { 112 panel: panel@0 {
98 compatible = "sharp,lq101r1sx01"; 113 compatible = "sharp,lq101r1sx01";
@@ -101,6 +116,18 @@ Example:
101 116
102 power-supply = <...>; 117 power-supply = <...>;
103 backlight = <...>; 118 backlight = <...>;
119
120 port {
121 panel_in: endpoint {
122 remote-endpoint = <&dsi0_out>;
123 };
124 };
125 };
126
127 port {
128 dsi0_out: endpoint {
129 remote-endpoint = <&panel_in>;
130 };
104 }; 131 };
105 }; 132 };
106 133
@@ -117,4 +144,6 @@ Example:
117 clock-names = "iface_clk"; 144 clock-names = "iface_clk";
118 clocks = <&mmcc MDSS_AHB_CLK>; 145 clocks = <&mmcc MDSS_AHB_CLK>;
119 vddio-supply = <&pma8084_l12>; 146 vddio-supply = <&pma8084_l12>;
147
148 qcom,dsi-phy-regulator-ldo-mode;
120 }; 149 };
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
index c43aa53debed..e926239e1101 100644
--- a/Documentation/devicetree/bindings/drm/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -2,8 +2,9 @@ Qualcomm adreno/snapdragon hdmi output
2 2
3Required properties: 3Required properties:
4- compatible: one of the following 4- compatible: one of the following
5 * "qcom,hdmi-tx-8994"
5 * "qcom,hdmi-tx-8084" 6 * "qcom,hdmi-tx-8084"
6 * "qcom,hdmi-tx-8074" 7 * "qcom,hdmi-tx-8974"
7 * "qcom,hdmi-tx-8660" 8 * "qcom,hdmi-tx-8660"
8 * "qcom,hdmi-tx-8960" 9 * "qcom,hdmi-tx-8960"
9- reg: Physical base address and length of the controller's registers 10- reg: Physical base address and length of the controller's registers
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index 009f4bfa1590..e685610d38e2 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -197,9 +197,11 @@ of the following host1x client modules:
197- sor: serial output resource 197- sor: serial output resource
198 198
199 Required properties: 199 Required properties:
200 - compatible: For Tegra124, must contain "nvidia,tegra124-sor". Otherwise, 200 - compatible: Should be:
201 must contain '"nvidia,<chip>-sor", "nvidia,tegra124-sor"', where <chip> 201 - "nvidia,tegra124-sor": for Tegra124 and Tegra132
202 is tegra132. 202 - "nvidia,tegra132-sor": for Tegra132
203 - "nvidia,tegra210-sor": for Tegra210
204 - "nvidia,tegra210-sor1": for Tegra210
203 - reg: Physical base address and length of the controller's registers. 205 - reg: Physical base address and length of the controller's registers.
204 - interrupts: The interrupt outputs from the controller. 206 - interrupts: The interrupt outputs from the controller.
205 - clocks: Must contain an entry for each entry in clock-names. 207 - clocks: Must contain an entry for each entry in clock-names.
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 6b1d75f1a529..a36dfce0032e 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -52,10 +52,9 @@ STMicroelectronics stih4xx platforms
52 See ../reset/reset.txt for details. 52 See ../reset/reset.txt for details.
53 - reset-names: names of the resets listed in resets property in the same 53 - reset-names: names of the resets listed in resets property in the same
54 order. 54 order.
55 - ranges: to allow probing of subdevices
56 55
57- sti-hdmi: hdmi output block 56- sti-hdmi: hdmi output block
58 must be a child of sti-tvout 57 must be a child of sti-display-subsystem
59 Required properties: 58 Required properties:
60 - compatible: "st,stih<chip>-hdmi"; 59 - compatible: "st,stih<chip>-hdmi";
61 - reg: Physical base address of the IP registers and length of memory mapped region. 60 - reg: Physical base address of the IP registers and length of memory mapped region.
@@ -72,7 +71,7 @@ STMicroelectronics stih4xx platforms
72 71
73sti-hda: 72sti-hda:
74 Required properties: 73 Required properties:
75 must be a child of sti-tvout 74 must be a child of sti-display-subsystem
76 - compatible: "st,stih<chip>-hda" 75 - compatible: "st,stih<chip>-hda"
77 - reg: Physical base address of the IP registers and length of memory mapped region. 76 - reg: Physical base address of the IP registers and length of memory mapped region.
78 - reg-names: names of the mapped memory regions listed in regs property in 77 - reg-names: names of the mapped memory regions listed in regs property in
@@ -85,7 +84,7 @@ sti-hda:
85 84
86sti-dvo: 85sti-dvo:
87 Required properties: 86 Required properties:
88 must be a child of sti-tvout 87 must be a child of sti-display-subsystem
89 - compatible: "st,stih<chip>-dvo" 88 - compatible: "st,stih<chip>-dvo"
90 - reg: Physical base address of the IP registers and length of memory mapped region. 89 - reg: Physical base address of the IP registers and length of memory mapped region.
91 - reg-names: names of the mapped memory regions listed in regs property in 90 - reg-names: names of the mapped memory regions listed in regs property in
@@ -195,38 +194,37 @@ Example:
195 reg-names = "tvout-reg", "hda-reg", "syscfg"; 194 reg-names = "tvout-reg", "hda-reg", "syscfg";
196 reset-names = "tvout"; 195 reset-names = "tvout";
197 resets = <&softreset STIH416_HDTVOUT_SOFTRESET>; 196 resets = <&softreset STIH416_HDTVOUT_SOFTRESET>;
198 ranges; 197 };
199 198
200 sti-hdmi@fe85c000 { 199 sti-hdmi@fe85c000 {
201 compatible = "st,stih416-hdmi"; 200 compatible = "st,stih416-hdmi";
202 reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>; 201 reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
203 reg-names = "hdmi-reg", "syscfg"; 202 reg-names = "hdmi-reg", "syscfg";
204 interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>; 203 interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>;
205 interrupt-names = "irq"; 204 interrupt-names = "irq";
206 clock-names = "pix", "tmds", "phy", "audio"; 205 clock-names = "pix", "tmds", "phy", "audio";
207 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>; 206 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
208 }; 207 };
209 208
210 sti-hda@fe85a000 { 209 sti-hda@fe85a000 {
211 compatible = "st,stih416-hda"; 210 compatible = "st,stih416-hda";
212 reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>; 211 reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>;
213 reg-names = "hda-reg", "video-dacs-ctrl"; 212 reg-names = "hda-reg", "video-dacs-ctrl";
214 clock-names = "pix", "hddac"; 213 clock-names = "pix", "hddac";
215 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>; 214 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
216 }; 215 };
217 216
218 sti-dvo@8d00400 { 217 sti-dvo@8d00400 {
219 compatible = "st,stih407-dvo"; 218 compatible = "st,stih407-dvo";
220 reg = <0x8d00400 0x200>; 219 reg = <0x8d00400 0x200>;
221 reg-names = "dvo-reg"; 220 reg-names = "dvo-reg";
222 clock-names = "dvo_pix", "dvo", 221 clock-names = "dvo_pix", "dvo",
223 "main_parent", "aux_parent"; 222 "main_parent", "aux_parent";
224 clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>, 223 clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
225 <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; 224 <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
226 pinctrl-names = "default"; 225 pinctrl-names = "default";
227 pinctrl-0 = <&pinctrl_dvo>; 226 pinctrl-0 = <&pinctrl_dvo>;
228 sti,panel = <&panel_dvo>; 227 sti,panel = <&panel_dvo>;
229 };
230 }; 228 };
231 229
232 sti-hqvdp@9c000000 { 230 sti-hqvdp@9c000000 {
@@ -237,7 +235,7 @@ Example:
237 reset-names = "hqvdp"; 235 reset-names = "hqvdp";
238 resets = <&softreset STIH407_HDQVDP_SOFTRESET>; 236 resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
239 st,vtg = <&vtg_main>; 237 st,vtg = <&vtg_main>;
240 }; 238 };
241 }; 239 };
242 ... 240 ...
243}; 241};
diff --git a/Documentation/devicetree/bindings/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
new file mode 100644
index 000000000000..bae0e2b51467
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
@@ -0,0 +1,7 @@
1AU Optronics Corporation 8.0" WUXGA TFT LCD panel
2
3Required properties:
4- compatible: should be "auo,b101ean01"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/lg,lg4573.txt b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
new file mode 100644
index 000000000000..824441f4e95a
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
@@ -0,0 +1,19 @@
1LG LG4573 TFT Liquid Crystal Display with SPI control bus
2
3Required properties:
4 - compatible: "lg,lg4573"
5 - reg: address of the panel on the SPI bus
6
7The panel must obey rules for SPI slave device specified in document [1].
8
9[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
10
11Example:
12
13 lcd_panel: display@0 {
14 #address-cells = <1>;
15 #size-cells = <1>;
16 compatible = "lg,lg4573";
17 spi-max-frequency = <10000000>;
18 reg = <0>;
19 };
diff --git a/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
new file mode 100644
index 000000000000..8e1914d1edb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
@@ -0,0 +1,7 @@
1NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
2
3Required properties:
4- compatible: should be "nec,nl4827hc19-05b"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
new file mode 100644
index 000000000000..ddf8e211d382
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
@@ -0,0 +1,7 @@
1OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
2
3Required properties:
4- compatible: should be "okaya,rs800480t-7x0gp"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d444757c4d9e..76228e3e0c74 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -140,6 +140,7 @@ mundoreader Mundo Reader S.L.
140murata Murata Manufacturing Co., Ltd. 140murata Murata Manufacturing Co., Ltd.
141mxicy Macronix International Co., Ltd. 141mxicy Macronix International Co., Ltd.
142national National Semiconductor 142national National Semiconductor
143nec NEC LCD Technologies, Ltd.
143neonode Neonode Inc. 144neonode Neonode Inc.
144netgear NETGEAR 145netgear NETGEAR
145netlogic Broadcom Corporation (formerly NetLogic Microsystems) 146netlogic Broadcom Corporation (formerly NetLogic Microsystems)
@@ -148,6 +149,7 @@ nintendo Nintendo
148nokia Nokia 149nokia Nokia
149nvidia NVIDIA 150nvidia NVIDIA
150nxp NXP Semiconductors 151nxp NXP Semiconductors
152okaya Okaya Electric America, Inc.
151onnn ON Semiconductor Corp. 153onnn ON Semiconductor Corp.
152opencores OpenCores.org 154opencores OpenCores.org
153ortustech Ortus Technology Co., Ltd. 155ortustech Ortus Technology Co., Ltd.
diff --git a/Documentation/devicetree/bindings/video/fsl,dcu.txt b/Documentation/devicetree/bindings/video/fsl,dcu.txt
new file mode 100644
index 000000000000..ebf1be9ae393
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/fsl,dcu.txt
@@ -0,0 +1,22 @@
1Device Tree bindings for Freescale DCU DRM Driver
2
3Required properties:
4- compatible: Should be one of
5 * "fsl,ls1021a-dcu".
6 * "fsl,vf610-dcu".
7
8- reg: Address and length of the register set for dcu.
9- clocks: From common clock binding: handle to dcu clock.
10- clock-names: From common clock binding: Shall be "dcu".
11- big-endian Boolean property, LS1021A DCU registers are big-endian.
12- fsl,panel: The phandle to panel node.
13
14Examples:
15dcu: dcu@2ce0000 {
16 compatible = "fsl,ls1021a-dcu";
17 reg = <0x0 0x2ce0000 0x0 0x10000>;
18 clocks = <&platform_clk 0>;
19 clock-names = "dcu";
20 big-endian;
21 fsl,panel = <&panel>;
22};
diff --git a/MAINTAINERS b/MAINTAINERS
index 94212186990d..740a13b43e8a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3555,6 +3555,15 @@ F: drivers/gpu/drm/exynos/
3555F: include/drm/exynos* 3555F: include/drm/exynos*
3556F: include/uapi/drm/exynos* 3556F: include/uapi/drm/exynos*
3557 3557
3558DRM DRIVERS FOR FREESCALE DCU
3559M: Jianwei Wang <jianwei.wang.chn@gmail.com>
3560M: Alison Wang <alison.wang@freescale.com>
3561L: dri-devel@lists.freedesktop.org
3562S: Supported
3563F: drivers/gpu/drm/fsl-dcu/
3564F: Documentation/devicetree/bindings/video/fsl,dcu.txt
3565F: Documentation/devicetree/bindings/panel/nec,nl4827hc19_05b.txt
3566
3558DRM DRIVERS FOR FREESCALE IMX 3567DRM DRIVERS FOR FREESCALE IMX
3559M: Philipp Zabel <p.zabel@pengutronix.de> 3568M: Philipp Zabel <p.zabel@pengutronix.de>
3560L: dri-devel@lists.freedesktop.org 3569L: dri-devel@lists.freedesktop.org
@@ -3592,6 +3601,15 @@ S: Maintained
3592F: drivers/gpu/drm/rockchip/ 3601F: drivers/gpu/drm/rockchip/
3593F: Documentation/devicetree/bindings/video/rockchip* 3602F: Documentation/devicetree/bindings/video/rockchip*
3594 3603
3604DRM DRIVERS FOR STI
3605M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
3606M: Vincent Abriou <vincent.abriou@st.com>
3607L: dri-devel@lists.freedesktop.org
3608T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
3609S: Maintained
3610F: drivers/gpu/drm/sti
3611F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
3612
3595DSBR100 USB FM RADIO DRIVER 3613DSBR100 USB FM RADIO DRIVER
3596M: Alexey Klimov <klimov.linux@gmail.com> 3614M: Alexey Klimov <klimov.linux@gmail.com>
3597L: linux-media@vger.kernel.org 3615L: linux-media@vger.kernel.org
diff --git a/Makefile b/Makefile
index 35b4c196c171..246053f04fb5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc8
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d203916..7451b447cc2d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
312 312
313PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) 313PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
314 314
315bootpImage uImage: zImage
316zImage: Image
317
315$(BOOT_TARGETS): vmlinux 318$(BOOT_TARGETS): vmlinux
316 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 319 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
317 320
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 4a0718ccf68e..1e29ccf77ea2 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
116 ranges = <0 0x2000 0x2000>; 116 ranges = <0 0x2000 0x2000>;
117 117
118 scm_conf: scm_conf@0 { 118 scm_conf: scm_conf@0 {
119 compatible = "syscon"; 119 compatible = "syscon", "simple-bus";
120 reg = <0x0 0x1400>; 120 reg = <0x0 0x1400>;
121 #address-cells = <1>; 121 #address-cells = <1>;
122 #size-cells = <1>; 122 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d13592080d..b57033e8c633 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
181 interrupt-names = "msi"; 181 interrupt-names = "msi";
182 #interrupt-cells = <1>; 182 #interrupt-cells = <1>;
183 interrupt-map-mask = <0 0 0 0x7>; 183 interrupt-map-mask = <0 0 0 0x7>;
184 interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 184 interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
185 <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 185 <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
186 <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, 186 <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
187 <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; 187 <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>, 188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
189 <&clks IMX6QDL_CLK_LVDS1_GATE>, 189 <&clks IMX6QDL_CLK_LVDS1_GATE>,
190 <&clks IMX6QDL_CLK_PCIE_REF_125M>; 190 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 1b6494fbdb91..675fb8e492c6 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -131,10 +131,17 @@
131 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>; 131 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
132 }; 132 };
133 }; 133 };
134
135 mdio: mdio@24200f00 {
136 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
137 #address-cells = <1>;
138 #size-cells = <0>;
139 reg = <0x24200f00 0x100>;
140 status = "disabled";
141 clocks = <&clkcpgmac>;
142 clock-names = "fck";
143 bus_freq = <2500000>;
144 };
134 /include/ "k2e-netcp.dtsi" 145 /include/ "k2e-netcp.dtsi"
135 }; 146 };
136}; 147};
137
138&mdio {
139 reg = <0x24200f00 0x100>;
140};
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae6472407b22..d0810a5f2968 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
98 #gpio-cells = <2>; 98 #gpio-cells = <2>;
99 gpio,syscon-dev = <&devctrl 0x25c>; 99 gpio,syscon-dev = <&devctrl 0x25c>;
100 }; 100 };
101
102 mdio: mdio@02090300 {
103 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
104 #address-cells = <1>;
105 #size-cells = <0>;
106 reg = <0x02090300 0x100>;
107 status = "disabled";
108 clocks = <&clkcpgmac>;
109 clock-names = "fck";
110 bus_freq = <2500000>;
111 };
101 /include/ "k2hk-netcp.dtsi" 112 /include/ "k2hk-netcp.dtsi"
102 }; 113 };
103}; 114};
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e007483615e..49fd414f680c 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
29 }; 29 };
30 30
31 soc { 31 soc {
32
33 /include/ "k2l-clocks.dtsi" 32 /include/ "k2l-clocks.dtsi"
34 33
35 uart2: serial@02348400 { 34 uart2: serial@02348400 {
@@ -79,6 +78,17 @@
79 #gpio-cells = <2>; 78 #gpio-cells = <2>;
80 gpio,syscon-dev = <&devctrl 0x24c>; 79 gpio,syscon-dev = <&devctrl 0x24c>;
81 }; 80 };
81
82 mdio: mdio@26200f00 {
83 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
84 #address-cells = <1>;
85 #size-cells = <0>;
86 reg = <0x26200f00 0x100>;
87 status = "disabled";
88 clocks = <&clkcpgmac>;
89 clock-names = "fck";
90 bus_freq = <2500000>;
91 };
82 /include/ "k2l-netcp.dtsi" 92 /include/ "k2l-netcp.dtsi"
83 }; 93 };
84}; 94};
@@ -96,7 +106,3 @@
96 /* Pin muxed. Enabled and configured by Bootloader */ 106 /* Pin muxed. Enabled and configured by Bootloader */
97 status = "disabled"; 107 status = "disabled";
98}; 108};
99
100&mdio {
101 reg = <0x26200f00 0x100>;
102};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index e7a6f6deabb6..72816d65f7ec 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
267 1 0 0x21000A00 0x00000100>; 267 1 0 0x21000A00 0x00000100>;
268 }; 268 };
269 269
270 mdio: mdio@02090300 {
271 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
272 #address-cells = <1>;
273 #size-cells = <0>;
274 reg = <0x02090300 0x100>;
275 status = "disabled";
276 clocks = <&clkpa>;
277 clock-names = "fck";
278 bus_freq = <2500000>;
279 };
280
281 kirq0: keystone_irq@26202a0 { 270 kirq0: keystone_irq@26202a0 {
282 compatible = "ti,keystone-irq"; 271 compatible = "ti,keystone-irq";
283 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>; 272 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963be003..2390f387c271 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
51 }; 51 };
52 52
53 scm_conf: scm_conf@270 { 53 scm_conf: scm_conf@270 {
54 compatible = "syscon"; 54 compatible = "syscon",
55 "simple-bus";
55 reg = <0x270 0x240>; 56 reg = <0x270 0x240>;
56 #address-cells = <1>; 57 #address-cells = <1>;
57 #size-cells = <1>; 58 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 7d31c6ff246f..abc4473e6f8a 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
191 }; 191 };
192 192
193 omap4_padconf_global: omap4_padconf_global@5a0 { 193 omap4_padconf_global: omap4_padconf_global@5a0 {
194 compatible = "syscon"; 194 compatible = "syscon",
195 "simple-bus";
195 reg = <0x5a0 0x170>; 196 reg = <0x5a0 0x170>;
196 #address-cells = <1>; 197 #address-cells = <1>;
197 #size-cells = <1>; 198 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index c8fd648a7108..b1a1263e6001 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
180 }; 180 };
181 181
182 omap5_padconf_global: omap5_padconf_global@5a0 { 182 omap5_padconf_global: omap5_padconf_global@5a0 {
183 compatible = "syscon"; 183 compatible = "syscon",
184 "simple-bus";
184 reg = <0x5a0 0xec>; 185 reg = <0x5a0 0xec>;
185 #address-cells = <1>; 186 #address-cells = <1>;
186 #size-cells = <1>; 187 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index a75f3289e653..b8f81fb418ce 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
15#include "skeleton.dtsi" 15#include "skeleton.dtsi"
16 16
17/ { 17/ {
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21 enable-method = "ste,dbx500-smp";
22
23 cpu-map {
24 cluster0 {
25 core0 {
26 cpu = <&CPU0>;
27 };
28 core1 {
29 cpu = <&CPU1>;
30 };
31 };
32 };
33 CPU0: cpu@300 {
34 device_type = "cpu";
35 compatible = "arm,cortex-a9";
36 reg = <0x300>;
37 };
38 CPU1: cpu@301 {
39 device_type = "cpu";
40 compatible = "arm,cortex-a9";
41 reg = <0x301>;
42 };
43 };
44
18 soc { 45 soc {
19 #address-cells = <1>; 46 #address-cells = <1>;
20 #size-cells = <1>; 47 #size-cells = <1>;
@@ -22,32 +49,6 @@
22 interrupt-parent = <&intc>; 49 interrupt-parent = <&intc>;
23 ranges; 50 ranges;
24 51
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 cpu-map {
30 cluster0 {
31 core0 {
32 cpu = <&CPU0>;
33 };
34 core1 {
35 cpu = <&CPU1>;
36 };
37 };
38 };
39 CPU0: cpu@0 {
40 device_type = "cpu";
41 compatible = "arm,cortex-a9";
42 reg = <0>;
43 };
44 CPU1: cpu@1 {
45 device_type = "cpu";
46 compatible = "arm,cortex-a9";
47 reg = <1>;
48 };
49 };
50
51 ptm@801ae000 { 52 ptm@801ae000 {
52 compatible = "arm,coresight-etm3x", "arm,primecell"; 53 compatible = "arm,coresight-etm3x", "arm,primecell";
53 reg = <0x801ae000 0x1000>; 54 reg = <0x801ae000 0x1000>;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 9504e7790288..3eaf8fbaf603 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -124,14 +124,14 @@ CONFIG_REGULATOR_S2MPS11=y
124CONFIG_REGULATOR_S5M8767=y 124CONFIG_REGULATOR_S5M8767=y
125CONFIG_REGULATOR_TPS65090=y 125CONFIG_REGULATOR_TPS65090=y
126CONFIG_DRM=y 126CONFIG_DRM=y
127CONFIG_DRM_PTN3460=y 127CONFIG_DRM_NXP_PTN3460=y
128CONFIG_DRM_PS8622=y 128CONFIG_DRM_PARADE_PS8622=y
129CONFIG_DRM_EXYNOS=y 129CONFIG_DRM_EXYNOS=y
130CONFIG_DRM_EXYNOS_FIMD=y 130CONFIG_DRM_EXYNOS_FIMD=y
131CONFIG_DRM_EXYNOS_DSI=y 131CONFIG_DRM_EXYNOS_DSI=y
132CONFIG_DRM_EXYNOS_HDMI=y 132CONFIG_DRM_EXYNOS_HDMI=y
133CONFIG_DRM_PANEL_SIMPLE=y 133CONFIG_DRM_PANEL_SIMPLE=y
134CONFIG_DRM_PANEL_S6E8AA0=y 134CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
135CONFIG_FB_SIMPLE=y 135CONFIG_FB_SIMPLE=y
136CONFIG_EXYNOS_VIDEO=y 136CONFIG_EXYNOS_VIDEO=y
137CONFIG_EXYNOS_MIPI_DSI=y 137CONFIG_EXYNOS_MIPI_DSI=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 5fd8df6f50ea..6413390212fe 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -429,15 +429,15 @@ CONFIG_VIDEO_RENESAS_VSP1=m
429CONFIG_VIDEO_ADV7180=m 429CONFIG_VIDEO_ADV7180=m
430CONFIG_VIDEO_ML86V7667=m 430CONFIG_VIDEO_ML86V7667=m
431CONFIG_DRM=y 431CONFIG_DRM=y
432CONFIG_DRM_PTN3460=m 432CONFIG_DRM_NXP_PTN3460=m
433CONFIG_DRM_PS8622=m 433CONFIG_DRM_PARADE_PS8622=m
434CONFIG_DRM_EXYNOS=m 434CONFIG_DRM_EXYNOS=m
435CONFIG_DRM_EXYNOS_DSI=y 435CONFIG_DRM_EXYNOS_DSI=y
436CONFIG_DRM_EXYNOS_FIMD=y 436CONFIG_DRM_EXYNOS_FIMD=y
437CONFIG_DRM_EXYNOS_HDMI=y 437CONFIG_DRM_EXYNOS_HDMI=y
438CONFIG_DRM_RCAR_DU=m 438CONFIG_DRM_RCAR_DU=m
439CONFIG_DRM_TEGRA=y 439CONFIG_DRM_TEGRA=y
440CONFIG_DRM_PANEL_S6E8AA0=m 440CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
441CONFIG_DRM_PANEL_SIMPLE=y 441CONFIG_DRM_PANEL_SIMPLE=y
442CONFIG_FB_ARMCLCD=y 442CONFIG_FB_ARMCLCD=y
443CONFIG_FB_WM8505=y 443CONFIG_FB_WM8505=y
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..b48dd4f37f80 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@ work_pending:
61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
62 ldmia sp, {r0 - r6} @ have to reload r0 - r6 62 ldmia sp, {r0 - r6} @ have to reload r0 - r6
63 b local_restart @ ... and off we go 63 b local_restart @ ... and off we go
64ENDPROC(ret_fast_syscall)
64 65
65/* 66/*
66 * "slow" syscall return path. "why" tells us if this was a real syscall. 67 * "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..29e2991465cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
399 sub lr, r4, r5 @ mmu has been enabled 399 sub lr, r4, r5 @ mmu has been enabled
400 add r3, r7, lr 400 add r3, r7, lr
401 ldrd r4, [r3, #0] @ get secondary_data.pgdir 401 ldrd r4, [r3, #0] @ get secondary_data.pgdir
402ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
403ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
404ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
402 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir 405 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
403 badr lr, __enable_mmu @ return address 406 badr lr, __enable_mmu @ return address
404 mov r13, r12 @ __secondary_switched address 407 mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
296 */ 296 */
297void update_vsyscall(struct timekeeper *tk) 297void update_vsyscall(struct timekeeper *tk)
298{ 298{
299 struct timespec xtime_coarse;
300 struct timespec64 *wtm = &tk->wall_to_monotonic; 299 struct timespec64 *wtm = &tk->wall_to_monotonic;
301 300
302 if (!cntvct_ok) { 301 if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
308 307
309 vdso_write_begin(vdso_data); 308 vdso_write_begin(vdso_data);
310 309
311 xtime_coarse = __current_kernel_time();
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk); 310 vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
313 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 311 vdso_data->xtime_coarse_sec = tk->xtime_sec;
314 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 312 vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
313 tk->tkr_mono.shift);
315 vdso_data->wtm_clock_sec = wtm->tv_sec; 314 vdso_data->wtm_clock_sec = wtm->tv_sec;
316 vdso_data->wtm_clock_nsec = wtm->tv_nsec; 315 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
317 316
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..4b39af2dfda9 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
96 } 96 }
97 97
98 /* the mmap semaphore is taken only if not in an atomic context */ 98 /* the mmap semaphore is taken only if not in an atomic context */
99 atomic = in_atomic(); 99 atomic = faulthandler_disabled();
100 100
101 if (!atomic) 101 if (!atomic)
102 down_read(&current->mm->mmap_sem); 102 down_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c9d136..4a87e86dec45 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
146 pd->base = of_iomap(np, 0); 146 pd->base = of_iomap(np, 0);
147 if (!pd->base) { 147 if (!pd->base) {
148 pr_warn("%s: failed to map memory\n", __func__); 148 pr_warn("%s: failed to map memory\n", __func__);
149 kfree(pd->pd.name); 149 kfree_const(pd->pd.name);
150 kfree(pd); 150 kfree(pd);
151 of_node_put(np);
152 continue; 151 continue;
153 } 152 }
154 153
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 8e52621b5a6b..e1d2e991d17a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
392 .irq_mask = wakeupgen_mask, 392 .irq_mask = wakeupgen_mask,
393 .irq_unmask = wakeupgen_unmask, 393 .irq_unmask = wakeupgen_unmask,
394 .irq_retrigger = irq_chip_retrigger_hierarchy, 394 .irq_retrigger = irq_chip_retrigger_hierarchy,
395 .irq_set_type = irq_chip_set_type_parent,
395 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 396 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
396#ifdef CONFIG_SMP 397#ifdef CONFIG_SMP
397 .irq_set_affinity = irq_chip_set_affinity_parent, 398 .irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
14VDSO_LDFLAGS += -nostdlib -shared 14VDSO_LDFLAGS += -nostdlib -shared
15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) 16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
17VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd) 17VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
18 18
19obj-$(CONFIG_VDSO) += vdso.o 19obj-$(CONFIG_VDSO) += vdso.o
20extra-$(CONFIG_VDSO) += vdso.lds 20extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
199 */ 199 */
200void update_vsyscall(struct timekeeper *tk) 200void update_vsyscall(struct timekeeper *tk)
201{ 201{
202 struct timespec xtime_coarse;
203 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); 202 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
204 203
205 ++vdso_data->tb_seq_count; 204 ++vdso_data->tb_seq_count;
206 smp_wmb(); 205 smp_wmb();
207 206
208 xtime_coarse = __current_kernel_time();
209 vdso_data->use_syscall = use_syscall; 207 vdso_data->use_syscall = use_syscall;
210 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 208 vdso_data->xtime_coarse_sec = tk->xtime_sec;
211 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 209 vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
210 tk->tkr_mono.shift;
212 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; 211 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
213 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 212 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
214 213
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e7003f12..baa7b6fc0a60 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
407 .set noat 407 .set noat
408 SAVE_ALL 408 SAVE_ALL
409 FEXPORT(handle_\exception\ext) 409 FEXPORT(handle_\exception\ext)
410 __BUILD_clear_\clear 410 __build_clear_\clear
411 .set at 411 .set at
412 __BUILD_\verbose \exception 412 __BUILD_\verbose \exception
413 move a0, sp 413 move a0, sp
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
80 SAVE_STATIC 80 SAVE_STATIC
81 move s0, t2 81 move s0, t2
82 move a0, sp 82 move a0, sp
83 daddiu a1, v0, __NR_64_Linux 83 move a1, v0
84 jal syscall_trace_enter 84 jal syscall_trace_enter
85 85
86 bltz v0, 2f # seccomp failed? Skip syscall 86 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
72 SAVE_STATIC 72 SAVE_STATIC
73 move s0, t2 73 move s0, t2
74 move a0, sp 74 move a0, sp
75 daddiu a1, v0, __NR_N32_Linux 75 move a1, v0
76 jal syscall_trace_enter 76 jal syscall_trace_enter
77 77
78 bltz v0, 2f # seccomp failed? Skip syscall 78 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 5a1844765a7a..a7e257d9cb90 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
140 */ 140 */
141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
142 movl RIP(%rsp), %ecx /* User %eip */ 142 movl RIP(%rsp), %ecx /* User %eip */
143 movq RAX(%rsp), %rax
143 RESTORE_RSI_RDI 144 RESTORE_RSI_RDI
144 xorl %edx, %edx /* Do not leak kernel information */ 145 xorl %edx, %edx /* Do not leak kernel information */
145 xorq %r8, %r8 146 xorq %r8, %r8
@@ -219,7 +220,6 @@ sysexit_from_sys_call:
2191: setbe %al /* 1 if error, 0 if not */ 2201: setbe %al /* 1 if error, 0 if not */
220 movzbl %al, %edi /* zero-extend that into %edi */ 221 movzbl %al, %edi /* zero-extend that into %edi */
221 call __audit_syscall_exit 222 call __audit_syscall_exit
222 movq RAX(%rsp), %rax /* reload syscall return value */
223 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi 223 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
224 DISABLE_INTERRUPTS(CLBR_NONE) 224 DISABLE_INTERRUPTS(CLBR_NONE)
225 TRACE_IRQS_OFF 225 TRACE_IRQS_OFF
@@ -368,6 +368,7 @@ sysretl_from_sys_call:
368 RESTORE_RSI_RDI_RDX 368 RESTORE_RSI_RDI_RDX
369 movl RIP(%rsp), %ecx 369 movl RIP(%rsp), %ecx
370 movl EFLAGS(%rsp), %r11d 370 movl EFLAGS(%rsp), %r11d
371 movq RAX(%rsp), %rax
371 xorq %r10, %r10 372 xorq %r10, %r10
372 xorq %r9, %r9 373 xorq %r9, %r9
373 xorq %r8, %r8 374 xorq %r8, %r8
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
57 unsigned long ip; 57 unsigned long ip;
58 unsigned long flags; 58 unsigned long flags;
59 unsigned short cs; 59 unsigned short cs;
60 unsigned short __pad2; /* Was called gs, but was always zero. */ 60 unsigned short gs;
61 unsigned short __pad1; /* Was called fs, but was always zero. */ 61 unsigned short fs;
62 unsigned short ss; 62 unsigned short __pad0;
63 unsigned long err; 63 unsigned long err;
64 unsigned long trapno; 64 unsigned long trapno;
65 unsigned long oldmask; 65 unsigned long oldmask;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b7bf11..d7f3b3b78ac3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@ do { \
79#else /* CONFIG_X86_32 */ 79#else /* CONFIG_X86_32 */
80 80
81/* frame pointer must be last for get_wchan */ 81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" 82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" 83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84 84
85#define __EXTRA_CLOBBER \ 85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15", "flags" 87 "r12", "r13", "r14", "r15"
88 88
89#ifdef CONFIG_CC_STACKPROTECTOR 89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \ 90#define __switch_canary \
@@ -100,11 +100,7 @@ do { \
100#define __switch_canary_iparam 100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */ 101#endif /* CC_STACKPROTECTOR */
102 102
103/* 103/* Save restore flags to clear handle leaking NT */
104 * There is no need to save or restore flags, because flags are always
105 * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
106 * has no effect.
107 */
108#define switch_to(prev, next, last) \ 104#define switch_to(prev, next, last) \
109 asm volatile(SAVE_CONTEXT \ 105 asm volatile(SAVE_CONTEXT \
110 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
177 __u64 rip; 177 __u64 rip;
178 __u64 eflags; /* RFLAGS */ 178 __u64 eflags; /* RFLAGS */
179 __u16 cs; 179 __u16 cs;
180 180 __u16 gs;
181 /* 181 __u16 fs;
182 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), 182 __u16 __pad0;
183 * Linux saved and restored fs and gs in these slots. This
184 * was counterproductive, as fsbase and gsbase were never
185 * saved, so arch_prctl was presumably unreliable.
186 *
187 * If these slots are ever needed for any other purpose, there
188 * is some risk that very old 64-bit binaries could get
189 * confused. I doubt that many such binaries still work,
190 * though, since the same patch in 2.5.64 also removed the
191 * 64-bit set_thread_area syscall, so it appears that there is
192 * no TLS API that works in both pre- and post-2.5.64 kernels.
193 */
194 __u16 __pad2; /* Was gs. */
195 __u16 __pad1; /* Was fs. */
196
197 __u16 ss;
198 __u64 err; 183 __u64 err;
199 __u64 trapno; 184 __u64 trapno;
200 __u64 oldmask; 185 __u64 oldmask;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f813261d9740..2683f36e4e0a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
322 irq_data->chip = &lapic_controller; 322 irq_data->chip = &lapic_controller;
323 irq_data->chip_data = data; 323 irq_data->chip_data = data;
324 irq_data->hwirq = virq + i; 324 irq_data->hwirq = virq + i;
325 err = assign_irq_vector_policy(virq, irq_data->node, data, 325 err = assign_irq_vector_policy(virq + i, irq_data->node, data,
326 info); 326 info);
327 if (err) 327 if (err)
328 goto error; 328 goto error;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a981fb2..6326ae24e4d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2535 cpuc->shared_regs = allocate_shared_regs(cpu); 2535 cpuc->shared_regs = allocate_shared_regs(cpu);
2536 if (!cpuc->shared_regs) 2536 if (!cpuc->shared_regs)
2537 return NOTIFY_BAD; 2537 goto err;
2538 } 2538 }
2539 2539
2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
2542 2542
2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); 2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2544 if (!cpuc->constraint_list) 2544 if (!cpuc->constraint_list)
2545 return NOTIFY_BAD; 2545 goto err_shared_regs;
2546 2546
2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2548 if (!cpuc->excl_cntrs) { 2548 if (!cpuc->excl_cntrs)
2549 kfree(cpuc->constraint_list); 2549 goto err_constraint_list;
2550 kfree(cpuc->shared_regs); 2550
2551 return NOTIFY_BAD;
2552 }
2553 cpuc->excl_thread_id = 0; 2551 cpuc->excl_thread_id = 0;
2554 } 2552 }
2555 2553
2556 return NOTIFY_OK; 2554 return NOTIFY_OK;
2555
2556err_constraint_list:
2557 kfree(cpuc->constraint_list);
2558 cpuc->constraint_list = NULL;
2559
2560err_shared_regs:
2561 kfree(cpuc->shared_regs);
2562 cpuc->shared_regs = NULL;
2563
2564err:
2565 return NOTIFY_BAD;
2557} 2566}
2558 2567
2559static void intel_pmu_cpu_starting(int cpu) 2568static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 63eb68b73589..377e8f8ed391 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
1255 cpumask_set_cpu(cpu, &cqm_cpumask); 1255 cpumask_set_cpu(cpu, &cqm_cpumask);
1256} 1256}
1257 1257
1258static void intel_cqm_cpu_prepare(unsigned int cpu) 1258static void intel_cqm_cpu_starting(unsigned int cpu)
1259{ 1259{
1260 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); 1260 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
1261 struct cpuinfo_x86 *c = &cpu_data(cpu); 1261 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
1296 unsigned int cpu = (unsigned long)hcpu; 1296 unsigned int cpu = (unsigned long)hcpu;
1297 1297
1298 switch (action & ~CPU_TASKS_FROZEN) { 1298 switch (action & ~CPU_TASKS_FROZEN) {
1299 case CPU_UP_PREPARE:
1300 intel_cqm_cpu_prepare(cpu);
1301 break;
1302 case CPU_DOWN_PREPARE: 1299 case CPU_DOWN_PREPARE:
1303 intel_cqm_cpu_exit(cpu); 1300 intel_cqm_cpu_exit(cpu);
1304 break; 1301 break;
1305 case CPU_STARTING: 1302 case CPU_STARTING:
1303 intel_cqm_cpu_starting(cpu);
1306 cqm_pick_event_reader(cpu); 1304 cqm_pick_event_reader(cpu);
1307 break; 1305 break;
1308 } 1306 }
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
1373 goto out; 1371 goto out;
1374 1372
1375 for_each_online_cpu(i) { 1373 for_each_online_cpu(i) {
1376 intel_cqm_cpu_prepare(i); 1374 intel_cqm_cpu_starting(i);
1377 cqm_pick_event_reader(i); 1375 cqm_pick_event_reader(i);
1378 } 1376 }
1379 1377
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 79de954626fd..d25097c3fc1d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
270 dst_fpu->fpregs_active = 0; 270 dst_fpu->fpregs_active = 0;
271 dst_fpu->last_cpu = -1; 271 dst_fpu->last_cpu = -1;
272 272
273 if (src_fpu->fpstate_active) 273 if (src_fpu->fpstate_active && cpu_has_fpu)
274 fpu_copy(dst_fpu, src_fpu); 274 fpu_copy(dst_fpu, src_fpu);
275 275
276 return 0; 276 return 0;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 1e173f6285c7..d14e9ac3235a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
40 write_cr0(cr0); 40 write_cr0(cr0);
41 41
42 /* Flush out any pending x87 state: */ 42 /* Flush out any pending x87 state: */
43 asm volatile ("fninit"); 43#ifdef CONFIG_MATH_EMULATION
44 if (!cpu_has_fpu)
45 fpstate_init_soft(&current->thread.fpu.state.soft);
46 else
47#endif
48 asm volatile ("fninit");
44} 49}
45 50
46/* 51/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 397688beed4b..c27cad726765 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
408static void mwait_idle(void) 408static void mwait_idle(void)
409{ 409{
410 if (!current_set_polling_and_test()) { 410 if (!current_set_polling_and_test()) {
411 trace_cpu_idle_rcuidle(1, smp_processor_id());
411 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { 412 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
412 smp_mb(); /* quirk */ 413 smp_mb(); /* quirk */
413 clflush((void *)&current_thread_info()->flags); 414 clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
419 __sti_mwait(0, 0); 420 __sti_mwait(0, 0);
420 else 421 else
421 local_irq_enable(); 422 local_irq_enable();
423 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
422 } else { 424 } else {
423 local_irq_enable(); 425 local_irq_enable();
424 } 426 }
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
93 COPY(r15); 93 COPY(r15);
94#endif /* CONFIG_X86_64 */ 94#endif /* CONFIG_X86_64 */
95 95
96#ifdef CONFIG_X86_32
96 COPY_SEG_CPL3(cs); 97 COPY_SEG_CPL3(cs);
97 COPY_SEG_CPL3(ss); 98 COPY_SEG_CPL3(ss);
99#else /* !CONFIG_X86_32 */
100 /* Kernel saves and restores only the CS segment register on signals,
101 * which is the bare minimum needed to allow mixed 32/64-bit code.
102 * App's signal handler can save/restore other segments if needed. */
103 COPY_SEG_CPL3(cs);
104#endif /* CONFIG_X86_32 */
98 105
99 get_user_ex(tmpflags, &sc->flags); 106 get_user_ex(tmpflags, &sc->flags);
100 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 107 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
154#else /* !CONFIG_X86_32 */ 161#else /* !CONFIG_X86_32 */
155 put_user_ex(regs->flags, &sc->flags); 162 put_user_ex(regs->flags, &sc->flags);
156 put_user_ex(regs->cs, &sc->cs); 163 put_user_ex(regs->cs, &sc->cs);
157 put_user_ex(0, &sc->__pad2); 164 put_user_ex(0, &sc->gs);
158 put_user_ex(0, &sc->__pad1); 165 put_user_ex(0, &sc->fs);
159 put_user_ex(regs->ss, &sc->ss);
160#endif /* CONFIG_X86_32 */ 166#endif /* CONFIG_X86_32 */
161 167
162 put_user_ex(fpstate, &sc->fpstate); 168 put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
451 457
452 regs->sp = (unsigned long)frame; 458 regs->sp = (unsigned long)frame;
453 459
454 /* 460 /* Set up the CS register to run signal handlers in 64-bit mode,
455 * Set up the CS and SS registers to run signal handlers in 461 even if the handler happens to be interrupting 32-bit code. */
456 * 64-bit mode, even if the handler happens to be interrupting
457 * 32-bit or 16-bit code.
458 *
459 * SS is subtle. In 64-bit mode, we don't need any particular
460 * SS descriptor, but we do need SS to be valid. It's possible
461 * that the old SS is entirely bogus -- this can happen if the
462 * signal we're trying to deliver is #GP or #SS caused by a bad
463 * SS value.
464 */
465 regs->cs = __USER_CS; 462 regs->cs = __USER_CS;
466 regs->ss = __USER_DS;
467 463
468 return 0; 464 return 0;
469} 465}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 6273324186ac..0ccb53a9fcd9 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28 struct desc_struct *desc; 28 struct desc_struct *desc;
29 unsigned long base; 29 unsigned long base;
30 30
31 seg &= ~7UL; 31 seg >>= 3;
32 32
33 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
34 if (unlikely(!child->mm->context.ldt || 34 if (unlikely(!child->mm->context.ldt ||
35 (seg >> 3) >= child->mm->context.ldt->size)) 35 seg >= child->mm->context.ldt->size))
36 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
37 else { 37 else {
38 desc = &child->mm->context.ldt->entries[seg]; 38 desc = &child->mm->context.ldt->entries[seg];
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ef2560075bf..8f0f6eca69da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2105 if (guest_cpuid_has_tsc_adjust(vcpu)) { 2105 if (guest_cpuid_has_tsc_adjust(vcpu)) {
2106 if (!msr_info->host_initiated) { 2106 if (!msr_info->host_initiated) {
2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2108 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 2108 adjust_tsc_offset_guest(vcpu, adj);
2109 } 2109 }
2110 vcpu->arch.ia32_tsc_adjust_msr = data; 2110 vcpu->arch.ia32_tsc_adjust_msr = data;
2111 } 2111 }
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6327static void process_smi(struct kvm_vcpu *vcpu) 6327static void process_smi(struct kvm_vcpu *vcpu)
6328{ 6328{
6329 struct kvm_segment cs, ds; 6329 struct kvm_segment cs, ds;
6330 struct desc_ptr dt;
6330 char buf[512]; 6331 char buf[512];
6331 u32 cr0; 6332 u32 cr0;
6332 6333
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
6359 6360
6360 kvm_x86_ops->set_cr4(vcpu, 0); 6361 kvm_x86_ops->set_cr4(vcpu, 0);
6361 6362
6363 /* Undocumented: IDT limit is set to zero on entry to SMM. */
6364 dt.address = dt.size = 0;
6365 kvm_x86_ops->set_idt(vcpu, &dt);
6366
6362 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 6367 __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6363 6368
6364 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 6369 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84ab49f3..3d8f2e421466 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
29 29
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/traps.h> 31#include <asm/traps.h>
32#include <asm/desc.h>
33#include <asm/user.h> 32#include <asm/user.h>
34#include <asm/fpu/internal.h> 33#include <asm/fpu/internal.h>
35 34
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
181 math_abort(FPU_info, SIGILL); 180 math_abort(FPU_info, SIGILL);
182 } 181 }
183 182
184 code_descriptor = LDT_DESCRIPTOR(FPU_CS); 183 code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
185 if (SEG_D_SIZE(code_descriptor)) { 184 if (SEG_D_SIZE(code_descriptor)) {
186 /* The above test may be wrong, the book is not clear */ 185 /* The above test may be wrong, the book is not clear */
187 /* Segmented 32 bit protected mode */ 186 /* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb61a4fa..5e044d506b7a 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18 18
19/* s is always from a cpu register, and the cpu does bounds checking 19#include <asm/desc.h>
20 * during register load --> no further bounds checks needed */ 20#include <asm/mmu_context.h>
21#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) 21
22static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
23{
24 static struct desc_struct zero_desc;
25 struct desc_struct ret = zero_desc;
26
27#ifdef CONFIG_MODIFY_LDT_SYSCALL
28 seg >>= 3;
29 mutex_lock(&current->mm->context.lock);
30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
31 ret = current->mm->context.ldt->entries[seg];
32 mutex_unlock(&current->mm->context.lock);
33#endif
34 return ret;
35}
36
22#define SEG_D_SIZE(x) ((x).b & (3 << 21)) 37#define SEG_D_SIZE(x) ((x).b & (3 << 21))
23#define SEG_G_BIT(x) ((x).b & (1 << 23)) 38#define SEG_G_BIT(x) ((x).b & (1 << 23))
24#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) 39#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99380f9..8300db71c2a6 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
20#include <linux/stddef.h> 20#include <linux/stddef.h>
21 21
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/desc.h>
24 23
25#include "fpu_system.h" 24#include "fpu_system.h"
26#include "exception.h" 25#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
158 addr->selector = PM_REG_(segment); 157 addr->selector = PM_REG_(segment);
159 } 158 }
160 159
161 descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); 160 descriptor = FPU_get_ldt_descriptor(addr->selector);
162 base_address = SEG_BASE_ADDR(descriptor); 161 base_address = SEG_BASE_ADDR(descriptor);
163 address = base_address + offset; 162 address = base_address + offset;
164 limit = base_address 163 limit = base_address
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda867a33..484145368a24 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@ config XEN
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 select XEN_HAVE_PVMMU 9 select XEN_HAVE_PVMMU
10 depends on X86_64 || (X86_32 && X86_PAE) 10 depends on X86_64 || (X86_32 && X86_PAE)
11 depends on X86_TSC 11 depends on X86_LOCAL_APIC && X86_TSC
12 help 12 help
13 This is the Linux Xen port. Enabling this will allow the 13 This is the Linux Xen port. Enabling this will allow the
14 kernel to boot in a paravirtualized environment under the 14 kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
17config XEN_DOM0 17config XEN_DOM0
18 def_bool y 18 def_bool y
19 depends on XEN && PCI_XEN && SWIOTLB_XEN 19 depends on XEN && PCI_XEN && SWIOTLB_XEN
20 depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI 20 depends on X86_IO_APIC && ACPI && PCI
21 21
22config XEN_PVHVM 22config XEN_PVHVM
23 def_bool y 23 def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
14 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o 16 p2m.o apic.o
17 17
18obj-$(CONFIG_EVENT_TRACING) += trace.o 18obj-$(CONFIG_EVENT_TRACING) += trace.o
19 19
20obj-$(CONFIG_SMP) += smp.o 20obj-$(CONFIG_SMP) += smp.o
21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
23obj-$(CONFIG_XEN_DOM0) += apic.o vga.o 23obj-$(CONFIG_XEN_DOM0) += vga.o
24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o 24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
25obj-$(CONFIG_XEN_EFI) += efi.o 25obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
101 101
102#ifdef CONFIG_XEN_DOM0 102#ifdef CONFIG_XEN_DOM0
103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); 103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
104void __init xen_init_apic(void);
105#else 104#else
106static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, 105static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
107 size_t size) 106 size_t size)
108{ 107{
109} 108}
110static inline void __init xen_init_apic(void)
111{
112}
113#endif 109#endif
114 110
111void __init xen_init_apic(void);
112
115#ifdef CONFIG_XEN_EFI 113#ifdef CONFIG_XEN_EFI
116extern void xen_efi_init(void); 114extern void xen_efi_init(void);
117#else 115#else
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bfffca9..e0057d035200 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
241 * Description: 241 * Description:
242 * Enables a low level driver to set a hard upper limit, 242 * Enables a low level driver to set a hard upper limit,
243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
244 * the device driver based upon the combined capabilities of I/O 244 * the device driver based upon the capabilities of the I/O
245 * controller and storage device. 245 * controller.
246 * 246 *
247 * max_sectors is a soft limit imposed by the block layer for 247 * max_sectors is a soft limit imposed by the block layer for
248 * filesystem type requests. This value can be overridden on a 248 * filesystem type requests. This value can be overridden on a
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da6770bc9e..b8efe36ce114 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
393 struct scatterlist *cipher = areq_ctx->cipher; 393 struct scatterlist *cipher = areq_ctx->cipher;
394 struct scatterlist *hsg = areq_ctx->hsg; 394 struct scatterlist *hsg = areq_ctx->hsg;
395 struct scatterlist *tsg = areq_ctx->tsg; 395 struct scatterlist *tsg = areq_ctx->tsg;
396 struct scatterlist *assoc1;
397 struct scatterlist *assoc2;
398 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 396 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
399 unsigned int cryptlen = req->cryptlen; 397 unsigned int cryptlen = req->cryptlen;
400 struct page *dstp; 398 struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
412 cryptlen += ivsize; 410 cryptlen += ivsize;
413 } 411 }
414 412
415 if (sg_is_last(assoc)) 413 if (assoc->length < 12)
416 return -EINVAL;
417
418 assoc1 = assoc + 1;
419 if (sg_is_last(assoc1))
420 return -EINVAL;
421
422 assoc2 = assoc + 2;
423 if (!sg_is_last(assoc2))
424 return -EINVAL; 414 return -EINVAL;
425 415
426 sg_init_table(hsg, 2); 416 sg_init_table(hsg, 2);
427 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 417 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
428 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 418 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
429 419
430 sg_init_table(tsg, 1); 420 sg_init_table(tsg, 1);
431 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 421 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
432 422
433 areq_ctx->cryptlen = cryptlen; 423 areq_ctx->cryptlen = cryptlen;
434 areq_ctx->headlen = assoc->length + assoc2->length; 424 areq_ctx->headlen = 8;
435 areq_ctx->trailen = assoc1->length; 425 areq_ctx->trailen = 4;
436 areq_ctx->sg = dst; 426 areq_ctx->sg = dst;
437 427
438 areq_ctx->complete = authenc_esn_geniv_ahash_done; 428 areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
563 struct scatterlist *cipher = areq_ctx->cipher; 553 struct scatterlist *cipher = areq_ctx->cipher;
564 struct scatterlist *hsg = areq_ctx->hsg; 554 struct scatterlist *hsg = areq_ctx->hsg;
565 struct scatterlist *tsg = areq_ctx->tsg; 555 struct scatterlist *tsg = areq_ctx->tsg;
566 struct scatterlist *assoc1;
567 struct scatterlist *assoc2;
568 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 556 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
569 struct page *srcp; 557 struct page *srcp;
570 u8 *vsrc; 558 u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
580 cryptlen += ivsize; 568 cryptlen += ivsize;
581 } 569 }
582 570
583 if (sg_is_last(assoc)) 571 if (assoc->length < 12)
584 return -EINVAL;
585
586 assoc1 = assoc + 1;
587 if (sg_is_last(assoc1))
588 return -EINVAL;
589
590 assoc2 = assoc + 2;
591 if (!sg_is_last(assoc2))
592 return -EINVAL; 572 return -EINVAL;
593 573
594 sg_init_table(hsg, 2); 574 sg_init_table(hsg, 2);
595 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 575 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
596 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 576 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
597 577
598 sg_init_table(tsg, 1); 578 sg_init_table(tsg, 1);
599 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 579 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
600 580
601 areq_ctx->cryptlen = cryptlen; 581 areq_ctx->cryptlen = cryptlen;
602 areq_ctx->headlen = assoc->length + assoc2->length; 582 areq_ctx->headlen = 8;
603 areq_ctx->trailen = assoc1->length; 583 areq_ctx->trailen = 4;
604 areq_ctx->sg = src; 584 areq_ctx->sg = src;
605 585
606 areq_ctx->complete = authenc_esn_verify_ahash_done; 586 areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75ef2411..2922f1f252d5 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/workqueue.h>
35#include <acpi/video.h> 36#include <acpi/video.h>
36 37
37ACPI_MODULE_NAME("video"); 38ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
41 42
42static bool backlight_notifier_registered; 43static bool backlight_notifier_registered;
43static struct notifier_block backlight_nb; 44static struct notifier_block backlight_nb;
45static struct work_struct backlight_notify_work;
44 46
45static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; 47static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
46static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; 48static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
262 { }, 264 { },
263}; 265};
264 266
267/* This uses a workqueue to avoid various locking ordering issues */
268static void acpi_video_backlight_notify_work(struct work_struct *work)
269{
270 if (acpi_video_get_backlight_type() != acpi_backlight_video)
271 acpi_video_unregister_backlight();
272}
273
265static int acpi_video_backlight_notify(struct notifier_block *nb, 274static int acpi_video_backlight_notify(struct notifier_block *nb,
266 unsigned long val, void *bd) 275 unsigned long val, void *bd)
267{ 276{
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
269 278
270 /* A raw bl registering may change video -> native */ 279 /* A raw bl registering may change video -> native */
271 if (backlight->props.type == BACKLIGHT_RAW && 280 if (backlight->props.type == BACKLIGHT_RAW &&
272 val == BACKLIGHT_REGISTERED && 281 val == BACKLIGHT_REGISTERED)
273 acpi_video_get_backlight_type() != acpi_backlight_video) 282 schedule_work(&backlight_notify_work);
274 acpi_video_unregister_backlight();
275 283
276 return NOTIFY_OK; 284 return NOTIFY_OK;
277} 285}
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
304 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 312 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
305 ACPI_UINT32_MAX, find_video, NULL, 313 ACPI_UINT32_MAX, find_video, NULL,
306 &video_caps, NULL); 314 &video_caps, NULL);
315 INIT_WORK(&backlight_notify_work,
316 acpi_video_backlight_notify_work);
307 backlight_nb.notifier_call = acpi_video_backlight_notify; 317 backlight_nb.notifier_call = acpi_video_backlight_notify;
308 backlight_nb.priority = 0; 318 backlight_nb.priority = 0;
309 if (backlight_register_notifier(&backlight_nb) == 0) 319 if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a885981..14b7305d2ba0 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
92 * Other architectures (e.g., ARM) either do not support big endian, or 92 * Other architectures (e.g., ARM) either do not support big endian, or
93 * else leave I/O in little endian mode. 93 * else leave I/O in little endian mode.
94 */ 94 */
95 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN)) 95 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
96 return __raw_readl(addr); 96 return __raw_readl(addr);
97 else 97 else
98 return readl_relaxed(addr); 98 return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
101static inline void brcm_sata_writereg(u32 val, void __iomem *addr) 101static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
102{ 102{
103 /* See brcm_sata_readreg() comments */ 103 /* See brcm_sata_readreg() comments */
104 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN)) 104 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
105 __raw_writel(val, addr); 105 __raw_writel(val, addr);
106 else 106 else
107 writel_relaxed(val, addr); 107 writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
209 priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL); 209 priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
210} 210}
211 211
212#ifdef CONFIG_PM_SLEEP
212static int brcm_ahci_suspend(struct device *dev) 213static int brcm_ahci_suspend(struct device *dev)
213{ 214{
214 struct ata_host *host = dev_get_drvdata(dev); 215 struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
231 brcm_sata_phys_enable(priv); 232 brcm_sata_phys_enable(priv);
232 return ahci_platform_resume(dev); 233 return ahci_platform_resume(dev);
233} 234}
235#endif
234 236
235static struct scsi_host_template ahci_platform_sht = { 237static struct scsi_host_template ahci_platform_sht = {
236 AHCI_SHT(DRV_NAME), 238 AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index db5d9f79a247..19bcb80b2031 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
694 * RETURNS: 694 * RETURNS:
695 * Block address read from @tf. 695 * Block address read from @tf.
696 */ 696 */
697u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 697u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
698{ 698{
699 u64 block = 0; 699 u64 block = 0;
700 700
701 if (!dev || tf->flags & ATA_TFLAG_LBA) { 701 if (tf->flags & ATA_TFLAG_LBA) {
702 if (tf->flags & ATA_TFLAG_LBA48) { 702 if (tf->flags & ATA_TFLAG_LBA48) {
703 block |= (u64)tf->hob_lbah << 40; 703 block |= (u64)tf->hob_lbah << 40;
704 block |= (u64)tf->hob_lbam << 32; 704 block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2147 return 0; 2147 return 0;
2148} 2148}
2149 2149
2150static void ata_dev_config_sense_reporting(struct ata_device *dev)
2151{
2152 unsigned int err_mask;
2153
2154 if (!ata_id_has_sense_reporting(dev->id))
2155 return;
2156
2157 if (ata_id_sense_reporting_enabled(dev->id))
2158 return;
2159
2160 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2161 if (err_mask) {
2162 ata_dev_dbg(dev,
2163 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2164 err_mask);
2165 }
2166}
2167
2168/** 2150/**
2169 * ata_dev_configure - Configure the specified ATA/ATAPI device 2151 * ata_dev_configure - Configure the specified ATA/ATAPI device
2170 * @dev: Target device to configure 2152 * @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
2387 dev->devslp_timing[i] = sata_setting[j]; 2369 dev->devslp_timing[i] = sata_setting[j];
2388 } 2370 }
2389 } 2371 }
2390 ata_dev_config_sense_reporting(dev); 2372
2391 dev->cdb_len = 16; 2373 dev->cdb_len = 16;
2392 } 2374 }
2393 2375
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031a893c..cb0508af1459 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1592 tf->hob_lbah = buf[10]; 1592 tf->hob_lbah = buf[10];
1593 tf->nsect = buf[12]; 1593 tf->nsect = buf[12];
1594 tf->hob_nsect = buf[13]; 1594 tf->hob_nsect = buf[13];
1595 if (ata_id_has_ncq_autosense(dev->id))
1596 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1597 1595
1598 return 0; 1596 return 0;
1599} 1597}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1630} 1628}
1631 1629
1632/** 1630/**
1633 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1634 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1635 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1636 * @dfl_sense_key: default sense key to use
1637 *
1638 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1639 * SENSE. This function is EH helper.
1640 *
1641 * LOCKING:
1642 * Kernel thread context (may sleep).
1643 *
1644 * RETURNS:
1645 * encoded sense data on success, 0 on failure or if sense data
1646 * is not available.
1647 */
1648static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
1649 struct scsi_cmnd *cmd)
1650{
1651 struct ata_device *dev = qc->dev;
1652 struct ata_taskfile tf;
1653 unsigned int err_mask;
1654
1655 if (!cmd)
1656 return 0;
1657
1658 DPRINTK("ATA request sense\n");
1659 ata_dev_warn(dev, "request sense\n");
1660 if (!ata_id_sense_reporting_enabled(dev->id)) {
1661 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1662 return 0;
1663 }
1664 ata_tf_init(dev, &tf);
1665
1666 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1667 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1668 tf.command = ATA_CMD_REQ_SENSE_DATA;
1669 tf.protocol = ATA_PROT_NODATA;
1670
1671 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1672 /*
1673 * ACS-4 states:
1674 * The device may set the SENSE DATA AVAILABLE bit to one in the
1675 * STATUS field and clear the ERROR bit to zero in the STATUS field
1676 * to indicate that the command returned completion without an error
1677 * and the sense data described in table 306 is available.
1678 *
1679 * IOW the 'ATA_SENSE' bit might not be set even though valid
1680 * sense data is available.
1681 * So check for both.
1682 */
1683 if ((tf.command & ATA_SENSE) ||
1684 tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
1685 ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
1686 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1687 ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
1688 tf.lbah, tf.lbam, tf.lbal);
1689 } else {
1690 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1691 tf.command, err_mask);
1692 }
1693 return err_mask;
1694}
1695
1696/**
1697 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1631 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1698 * @dev: device to perform REQUEST_SENSE to 1632 * @dev: device to perform REQUEST_SENSE to
1699 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1633 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1855 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1789 memcpy(&qc->result_tf, &tf, sizeof(tf));
1856 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1790 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1857 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1791 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1858 if (qc->result_tf.auxiliary) {
1859 char sense_key, asc, ascq;
1860
1861 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1862 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1863 ascq = qc->result_tf.auxiliary & 0xff;
1864 ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
1865 sense_key, asc, ascq);
1866 ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
1867 ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
1868 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1869 }
1870
1871 ehc->i.err_mask &= ~AC_ERR_DEV; 1792 ehc->i.err_mask &= ~AC_ERR_DEV;
1872} 1793}
1873 1794
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1897 return ATA_EH_RESET; 1818 return ATA_EH_RESET;
1898 } 1819 }
1899 1820
1900 /*
1901 * Sense data reporting does not work if the
1902 * device fault bit is set.
1903 */
1904 if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
1905 !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
1906 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1907 tmp = ata_eh_request_sense(qc, qc->scsicmd);
1908 if (tmp)
1909 qc->err_mask |= tmp;
1910 else
1911 ata_scsi_set_sense_information(qc->scsicmd, tf);
1912 } else {
1913 ata_dev_warn(qc->dev, "sense data available but port frozen\n");
1914 }
1915 }
1916
1917 /* Set by NCQ autosense or request sense above */
1918 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1919 return 0;
1920
1921 if (stat & (ATA_ERR | ATA_DF)) 1821 if (stat & (ATA_ERR | ATA_DF))
1922 qc->err_mask |= AC_ERR_DEV; 1822 qc->err_mask |= AC_ERR_DEV;
1923 else 1823 else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
2661 2561
2662#ifdef CONFIG_ATA_VERBOSE_ERROR 2562#ifdef CONFIG_ATA_VERBOSE_ERROR
2663 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2563 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2664 ATA_SENSE | ATA_ERR)) { 2564 ATA_ERR)) {
2665 if (res->command & ATA_BUSY) 2565 if (res->command & ATA_BUSY)
2666 ata_dev_err(qc->dev, "status: { Busy }\n"); 2566 ata_dev_err(qc->dev, "status: { Busy }\n");
2667 else 2567 else
2668 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 2568 ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
2669 res->command & ATA_DRDY ? "DRDY " : "", 2569 res->command & ATA_DRDY ? "DRDY " : "",
2670 res->command & ATA_DF ? "DF " : "", 2570 res->command & ATA_DF ? "DF " : "",
2671 res->command & ATA_DRQ ? "DRQ " : "", 2571 res->command & ATA_DRQ ? "DRQ " : "",
2672 res->command & ATA_SENSE ? "SENSE " : "",
2673 res->command & ATA_ERR ? "ERR " : ""); 2572 res->command & ATA_ERR ? "ERR " : "");
2674 } 2573 }
2675 2574
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 641a61a59e89..0d7f0da3a269 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
270 ata_scsi_park_show, ata_scsi_park_store); 270 ata_scsi_park_show, ata_scsi_park_store);
271EXPORT_SYMBOL_GPL(dev_attr_unload_heads); 271EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
272 272
273void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 273static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
274{ 274{
275 if (!cmd)
276 return;
277
278 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 275 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
279 276
280 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); 277 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
281} 278}
282 279
283void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
284 const struct ata_taskfile *tf)
285{
286 u64 information;
287
288 if (!cmd)
289 return;
290
291 information = ata_tf_read_block(tf, NULL);
292 scsi_set_sense_information(cmd->sense_buffer, information);
293}
294
295static ssize_t 280static ssize_t
296ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, 281ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t count) 282 const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1792 ((cdb[2] & 0x20) || need_sense)) { 1777 ((cdb[2] & 0x20) || need_sense)) {
1793 ata_gen_passthru_sense(qc); 1778 ata_gen_passthru_sense(qc);
1794 } else { 1779 } else {
1795 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1780 if (!need_sense) {
1796 cmd->result = SAM_STAT_CHECK_CONDITION;
1797 } else if (!need_sense) {
1798 cmd->result = SAM_STAT_GOOD; 1781 cmd->result = SAM_STAT_GOOD;
1799 } else { 1782 } else {
1800 /* TODO: decide which descriptor format to use 1783 /* TODO: decide which descriptor format to use
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a175f9f1..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
67extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 67extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
68 u64 block, u32 n_block, unsigned int tf_flags, 68 u64 block, u32 n_block, unsigned int tf_flags,
69 unsigned int tag); 69 unsigned int tag);
70extern u64 ata_tf_read_block(const struct ata_taskfile *tf, 70extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
71 struct ata_device *dev);
72extern unsigned ata_exec_internal(struct ata_device *dev, 71extern unsigned ata_exec_internal(struct ata_device *dev,
73 struct ata_taskfile *tf, const u8 *cdb, 72 struct ata_taskfile *tf, const u8 *cdb,
74 int dma_dir, void *buf, unsigned int buflen, 73 int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
138 struct scsi_host_template *sht); 137 struct scsi_host_template *sht);
139extern void ata_scsi_scan_host(struct ata_port *ap, int sync); 138extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
140extern int ata_scsi_offline_dev(struct ata_device *dev); 139extern int ata_scsi_offline_dev(struct ata_device *dev);
141extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
142extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
143 const struct ata_taskfile *tf);
144extern void ata_scsi_media_change_notify(struct ata_device *dev); 140extern void ata_scsi_media_change_notify(struct ata_device *dev);
145extern void ata_scsi_hotplug(struct work_struct *work); 141extern void ata_scsi_hotplug(struct work_struct *work);
146extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 142extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a719b4..fab504fd9cfd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1238 readl(mmio + PDC_SDRAM_CONTROL); 1238 readl(mmio + PDC_SDRAM_CONTROL);
1239 1239
1240 /* Turn on for ECC */ 1240 /* Turn on for ECC */
1241 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1241 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1242 PDC_DIMM_SPD_TYPE, &spd0); 1242 PDC_DIMM_SPD_TYPE, &spd0)) {
1243 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1244 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1245 return 1;
1246 }
1243 if (spd0 == 0x02) { 1247 if (spd0 == 0x02) {
1244 data |= (0x01 << 16); 1248 data |= (0x01 << 16);
1245 writel(data, mmio + PDC_SDRAM_CONTROL); 1249 writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
1380 1384
1381 /* ECC initiliazation. */ 1385 /* ECC initiliazation. */
1382 1386
1383 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1387 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1384 PDC_DIMM_SPD_TYPE, &spd0); 1388 PDC_DIMM_SPD_TYPE, &spd0)) {
1389 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1390 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1391 return 1;
1392 }
1385 if (spd0 == 0x02) { 1393 if (spd0 == 0x02) {
1386 void *buf; 1394 void *buf;
1387 VPRINTK("Start ECC initialization\n"); 1395 VPRINTK("Start ECC initialization\n");
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
296 if (!blk) 296 if (!blk)
297 return -ENOMEM; 297 return -ENOMEM;
298 298
299 present = krealloc(rbnode->cache_present, 299 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
300 BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); 300 present = krealloc(rbnode->cache_present,
301 if (!present) { 301 BITS_TO_LONGS(blklen) * sizeof(*present),
302 kfree(blk); 302 GFP_KERNEL);
303 return -ENOMEM; 303 if (!present) {
304 kfree(blk);
305 return -ENOMEM;
306 }
307
308 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
309 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
310 * sizeof(*present));
311 } else {
312 present = rbnode->cache_present;
304 } 313 }
305 314
306 /* insert the register value in the correct place in the rbnode block */ 315 /* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..954c0029fb3b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
369 return; 369 return;
370 } 370 }
371 371
372 if (work_pending(&blkif->persistent_purge_work)) { 372 if (work_busy(&blkif->persistent_purge_work)) {
373 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); 373 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
374 return; 374 return;
375 } 375 }
376 376
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..7a8a73f1fc04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
180 180
181static int blkfront_setup_indirect(struct blkfront_info *info); 181static int blkfront_setup_indirect(struct blkfront_info *info);
182static int blkfront_gather_backend_features(struct blkfront_info *info);
182 183
183static int get_id_from_freelist(struct blkfront_info *info) 184static int get_id_from_freelist(struct blkfront_info *info)
184{ 185{
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1128 * Add the used indirect page back to the list of 1129 * Add the used indirect page back to the list of
1129 * available pages for indirect grefs. 1130 * available pages for indirect grefs.
1130 */ 1131 */
1131 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); 1132 if (!info->feature_persistent) {
1132 list_add(&indirect_page->lru, &info->indirect_pages); 1133 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1134 list_add(&indirect_page->lru, &info->indirect_pages);
1135 }
1133 s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1136 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1134 list_add_tail(&s->indirect_grants[i]->node, &info->grants); 1137 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1135 } 1138 }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
1519 info->shadow_free = info->ring.req_prod_pvt; 1522 info->shadow_free = info->ring.req_prod_pvt;
1520 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 1523 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1521 1524
1522 rc = blkfront_setup_indirect(info); 1525 rc = blkfront_gather_backend_features(info);
1523 if (rc) { 1526 if (rc) {
1524 kfree(copy); 1527 kfree(copy);
1525 return rc; 1528 return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1720 1723
1721static int blkfront_setup_indirect(struct blkfront_info *info) 1724static int blkfront_setup_indirect(struct blkfront_info *info)
1722{ 1725{
1723 unsigned int indirect_segments, segs; 1726 unsigned int segs;
1724 int err, i; 1727 int err, i;
1725 1728
1726 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1729 if (info->max_indirect_segments == 0)
1727 "feature-max-indirect-segments", "%u", &indirect_segments,
1728 NULL);
1729 if (err) {
1730 info->max_indirect_segments = 0;
1731 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; 1730 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1732 } else { 1731 else
1733 info->max_indirect_segments = min(indirect_segments,
1734 xen_blkif_max_segments);
1735 segs = info->max_indirect_segments; 1732 segs = info->max_indirect_segments;
1736 }
1737 1733
1738 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); 1734 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
1739 if (err) 1735 if (err)
@@ -1797,6 +1793,68 @@ out_of_memory:
1797} 1793}
1798 1794
1799/* 1795/*
1796 * Gather all backend feature-*
1797 */
1798static int blkfront_gather_backend_features(struct blkfront_info *info)
1799{
1800 int err;
1801 int barrier, flush, discard, persistent;
1802 unsigned int indirect_segments;
1803
1804 info->feature_flush = 0;
1805
1806 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1807 "feature-barrier", "%d", &barrier,
1808 NULL);
1809
1810 /*
1811 * If there's no "feature-barrier" defined, then it means
1812 * we're dealing with a very old backend which writes
1813 * synchronously; nothing to do.
1814 *
1815 * If there are barriers, then we use flush.
1816 */
1817 if (!err && barrier)
1818 info->feature_flush = REQ_FLUSH | REQ_FUA;
1819 /*
1820 * And if there is "feature-flush-cache" use that above
1821 * barriers.
1822 */
1823 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1824 "feature-flush-cache", "%d", &flush,
1825 NULL);
1826
1827 if (!err && flush)
1828 info->feature_flush = REQ_FLUSH;
1829
1830 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1831 "feature-discard", "%d", &discard,
1832 NULL);
1833
1834 if (!err && discard)
1835 blkfront_setup_discard(info);
1836
1837 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1838 "feature-persistent", "%u", &persistent,
1839 NULL);
1840 if (err)
1841 info->feature_persistent = 0;
1842 else
1843 info->feature_persistent = persistent;
1844
1845 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1846 "feature-max-indirect-segments", "%u", &indirect_segments,
1847 NULL);
1848 if (err)
1849 info->max_indirect_segments = 0;
1850 else
1851 info->max_indirect_segments = min(indirect_segments,
1852 xen_blkif_max_segments);
1853
1854 return blkfront_setup_indirect(info);
1855}
1856
1857/*
1800 * Invoked when the backend is finally 'ready' (and has told produced 1858 * Invoked when the backend is finally 'ready' (and has told produced
1801 * the details about the physical device - #sectors, size, etc). 1859 * the details about the physical device - #sectors, size, etc).
1802 */ 1860 */
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
1807 unsigned int physical_sector_size; 1865 unsigned int physical_sector_size;
1808 unsigned int binfo; 1866 unsigned int binfo;
1809 int err; 1867 int err;
1810 int barrier, flush, discard, persistent;
1811 1868
1812 switch (info->connected) { 1869 switch (info->connected) {
1813 case BLKIF_STATE_CONNECTED: 1870 case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
1864 if (err != 1) 1921 if (err != 1)
1865 physical_sector_size = sector_size; 1922 physical_sector_size = sector_size;
1866 1923
1867 info->feature_flush = 0; 1924 err = blkfront_gather_backend_features(info);
1868
1869 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1870 "feature-barrier", "%d", &barrier,
1871 NULL);
1872
1873 /*
1874 * If there's no "feature-barrier" defined, then it means
1875 * we're dealing with a very old backend which writes
1876 * synchronously; nothing to do.
1877 *
1878 * If there are barriers, then we use flush.
1879 */
1880 if (!err && barrier)
1881 info->feature_flush = REQ_FLUSH | REQ_FUA;
1882 /*
1883 * And if there is "feature-flush-cache" use that above
1884 * barriers.
1885 */
1886 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1887 "feature-flush-cache", "%d", &flush,
1888 NULL);
1889
1890 if (!err && flush)
1891 info->feature_flush = REQ_FLUSH;
1892
1893 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1894 "feature-discard", "%d", &discard,
1895 NULL);
1896
1897 if (!err && discard)
1898 blkfront_setup_discard(info);
1899
1900 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1901 "feature-persistent", "%u", &persistent,
1902 NULL);
1903 if (err)
1904 info->feature_persistent = 0;
1905 else
1906 info->feature_persistent = persistent;
1907
1908 err = blkfront_setup_indirect(info);
1909 if (err) { 1925 if (err) {
1910 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", 1926 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1911 info->xbdev->otherend); 1927 info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..763301c7828c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
496 kfree(meta); 496 kfree(meta);
497} 497}
498 498
499static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) 499static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
500{ 500{
501 size_t num_pages; 501 size_t num_pages;
502 char pool_name[8];
503 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); 502 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
504 503
505 if (!meta) 504 if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
512 goto out_error; 511 goto out_error;
513 } 512 }
514 513
515 snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
516 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); 514 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
517 if (!meta->mem_pool) { 515 if (!meta->mem_pool) {
518 pr_err("Error creating memory pool\n"); 516 pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
1031 return -EINVAL; 1029 return -EINVAL;
1032 1030
1033 disksize = PAGE_ALIGN(disksize); 1031 disksize = PAGE_ALIGN(disksize);
1034 meta = zram_meta_alloc(zram->disk->first_minor, disksize); 1032 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1035 if (!meta) 1033 if (!meta)
1036 return -ENOMEM; 1034 return -ENOMEM;
1037 1035
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..ac03ba49e9d1 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" }; 126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" }; 127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
128 128
129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB) 129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \ 130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
131 div_hp, bit, is_lp, flags) \ 131 div_hp, bit, is_lp, flags) \
132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \ 132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..c96de14036a0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
661{ 661{
662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
663 663
664 if (!ch->cs_enabled)
665 return;
666
664 sh_cmt_stop(ch, FLAG_CLOCKSOURCE); 667 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
665 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); 668 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
666} 669}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
669{ 672{
670 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 673 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
671 674
675 if (!ch->cs_enabled)
676 return;
677
672 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); 678 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
673 sh_cmt_start(ch, FLAG_CLOCKSOURCE); 679 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
674} 680}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 2d59038dec43..86c7eb66bdfb 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
462 BUG_ON(!imxtm->base); 462 BUG_ON(!imxtm->base);
463 463
464 imxtm->type = type; 464 imxtm->type = type;
465 imxtm->irq = irq;
465 466
466 _mxc_timer_init(imxtm); 467 _mxc_timer_init(imxtm);
467} 468}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index ae5b2bd3a978..fa3dd840a837 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
180 ret = exynos5250_cpufreq_init(exynos_info); 180 ret = exynos5250_cpufreq_init(exynos_info);
181 } else { 181 } else {
182 pr_err("%s: Unknown SoC type\n", __func__); 182 pr_err("%s: Unknown SoC type\n", __func__);
183 return -ENODEV; 183 ret = -ENODEV;
184 } 184 }
185 185
186 if (ret) 186 if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
188 188
189 if (exynos_info->set_freq == NULL) { 189 if (exynos_info->set_freq == NULL) {
190 dev_err(&pdev->dev, "No set_freq function (ERR)\n"); 190 dev_err(&pdev->dev, "No set_freq function (ERR)\n");
191 ret = -EINVAL;
191 goto err_vdd_arm; 192 goto err_vdd_arm;
192 } 193 }
193 194
194 arm_regulator = regulator_get(NULL, "vdd_arm"); 195 arm_regulator = regulator_get(NULL, "vdd_arm");
195 if (IS_ERR(arm_regulator)) { 196 if (IS_ERR(arm_regulator)) {
196 dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); 197 dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
198 ret = -EINVAL;
197 goto err_vdd_arm; 199 goto err_vdd_arm;
198 } 200 }
199 201
@@ -225,7 +227,7 @@ err_cpufreq_reg:
225 regulator_put(arm_regulator); 227 regulator_put(arm_regulator);
226err_vdd_arm: 228err_vdd_arm:
227 kfree(exynos_info); 229 kfree(exynos_info);
228 return -EINVAL; 230 return ret;
229} 231}
230 232
231static struct platform_driver exynos_cpufreq_platdrv = { 233static struct platform_driver exynos_cpufreq_platdrv = {
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..f9c78751989e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
909 state->buflen_1; 909 state->buflen_1;
910 u32 *sh_desc = ctx->sh_desc_fin, *desc; 910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma; 911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
912 int sec4_sg_bytes; 912 int sec4_sg_bytes, sec4_sg_src_index;
913 int digestsize = crypto_ahash_digestsize(ahash); 913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc; 914 struct ahash_edesc *edesc;
915 int ret = 0; 915 int ret = 0;
916 int sh_len; 916 int sh_len;
917 917
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); 918 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
919 920
920 /* allocate space for base edesc and hw desc commands, link tables */ 921 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 922 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen, 944 buf, state->buf_dma, buflen,
944 last_buflen); 945 last_buflen);
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 946 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
946 947
947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE); 949 sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 08f8d5cd6334..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
71 struct sha256_state *sctx = shash_desc_ctx(desc); 71 struct sha256_state *sctx = shash_desc_ctx(desc);
72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
74 struct nx_sg *in_sg;
75 struct nx_sg *out_sg; 74 struct nx_sg *out_sg;
76 u64 to_process = 0, leftover, total; 75 u64 to_process = 0, leftover, total;
77 unsigned long irq_flags; 76 unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
99 98
100 in_sg = nx_ctx->in_sg;
101 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
102 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
103 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
114 } 112 }
115 113
116 do { 114 do {
117 /* 115 int used_sgs = 0;
118 * to_process: the SHA256_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
119 * this update. This value is also restricted by the sg list
120 * limits.
121 */
122 to_process = total - to_process;
123 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
124 117
125 if (buf_len) { 118 if (buf_len) {
126 data_len = buf_len; 119 data_len = buf_len;
127 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
128 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
129 &data_len, 122 &data_len,
130 max_sg_len); 123 max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
133 rc = -EINVAL; 126 rc = -EINVAL;
134 goto out; 127 goto out;
135 } 128 }
129 used_sgs = in_sg - nx_ctx->in_sg;
136 } 130 }
137 131
132 /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
133 * processed in this iteration. This value is restricted
134 * by sg list limits and number of sgs we already used
135 * for leftover data. (see above)
136 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
137 * but because data may not be aligned, we need to account
138 * for that too. */
139 to_process = min_t(u64, total,
140 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
141 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
142
138 data_len = to_process - buf_len; 143 data_len = to_process - buf_len;
139 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 144 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
140 &data_len, max_sg_len); 145 &data_len, max_sg_len);
141 146
142 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 147 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
143 148
144 to_process = (data_len + buf_len); 149 to_process = data_len + buf_len;
145 leftover = total - to_process; 150 leftover = total - to_process;
146 151
147 /* 152 /*
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index aff0fe58eac0..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
71 struct sha512_state *sctx = shash_desc_ctx(desc); 71 struct sha512_state *sctx = shash_desc_ctx(desc);
72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
74 struct nx_sg *in_sg;
75 struct nx_sg *out_sg; 74 struct nx_sg *out_sg;
76 u64 to_process, leftover = 0, total; 75 u64 to_process, leftover = 0, total;
77 unsigned long irq_flags; 76 unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
99 98
100 in_sg = nx_ctx->in_sg;
101 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
102 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
103 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
114 } 112 }
115 113
116 do { 114 do {
117 /* 115 int used_sgs = 0;
118 * to_process: the SHA512_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
119 * this update. This value is also restricted by the sg list
120 * limits.
121 */
122 to_process = total - leftover;
123 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
124 leftover = total - to_process;
125 117
126 if (buf_len) { 118 if (buf_len) {
127 data_len = buf_len; 119 data_len = buf_len;
128 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
129 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
130 &data_len, max_sg_len); 122 &data_len, max_sg_len);
131 123
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
133 rc = -EINVAL; 125 rc = -EINVAL;
134 goto out; 126 goto out;
135 } 127 }
128 used_sgs = in_sg - nx_ctx->in_sg;
136 } 129 }
137 130
131 /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
132 * processed in this iteration. This value is restricted
133 * by sg list limits and number of sgs we already used
134 * for leftover data. (see above)
135 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
136 * but because data may not be aligned, we need to account
137 * for that too. */
138 to_process = min_t(u64, total,
139 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
140 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
141
138 data_len = to_process - buf_len; 142 data_len = to_process - buf_len;
139 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 143 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
140 &data_len, max_sg_len); 144 &data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
146 goto out; 150 goto out;
147 } 151 }
148 152
149 to_process = (data_len + buf_len); 153 to_process = data_len + buf_len;
150 leftover = total - to_process; 154 leftover = total - to_process;
151 155
152 /* 156 /*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce15f25d..3ff284c8e3d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); 689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
690 if (IS_ERR(ch)) 690 if (IS_ERR(ch))
691 return NULL; 691 return NULL;
692
693 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
694 ch->device->privatecnt++;
695
692 return ch; 696 return ch;
693} 697}
694EXPORT_SYMBOL_GPL(dma_request_slave_channel); 698EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
920 */ 920 */
921 921
922 for (row = 0; row < mci->nr_csrows; row++) { 922 for (row = 0; row < mci->nr_csrows; row++) {
923 struct csrow_info *csi = &mci->csrows[row]; 923 struct csrow_info *csi = mci->csrows[row];
924 924
925 /* 925 /*
926 * Get the configuration settings for this 926 * Get the configuration settings for this
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3fdce52..e41594510b97 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
245} 245}
246EXPORT_SYMBOL(bcm47xx_nvram_get_contents); 246EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
247 247
248MODULE_LICENSE("GPLv2"); 248MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c46ca311d8c3..1a0a8df2eed8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER
37 select FB 37 select FB
38 select FRAMEBUFFER_CONSOLE if !EXPERT 38 select FRAMEBUFFER_CONSOLE if !EXPERT
39 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE 39 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
40 select FB_SYS_FOPS
41 select FB_SYS_FILLRECT
42 select FB_SYS_COPYAREA
43 select FB_SYS_IMAGEBLIT
44 select FB_CFB_FILLRECT
45 select FB_CFB_COPYAREA
46 select FB_CFB_IMAGEBLIT
40 help 47 help
41 FBDEV helpers for KMS drivers. 48 FBDEV helpers for KMS drivers.
42 49
50config DRM_FBDEV_EMULATION
51 bool "Enable legacy fbdev support for your modesetting driver"
52 depends on DRM
53 select DRM_KMS_HELPER
54 select DRM_KMS_FB_HELPER
55 default y
56 help
57 Choose this option if you have a need for the legacy fbdev
58 support. Note that this support also provides the linux console
59 support on top of your modesetting driver.
60
61 If in doubt, say "Y".
62
43config DRM_LOAD_EDID_FIRMWARE 63config DRM_LOAD_EDID_FIRMWARE
44 bool "Allow to specify an EDID data set instead of probing for it" 64 bool "Allow to specify an EDID data set instead of probing for it"
45 depends on DRM_KMS_HELPER 65 depends on DRM_KMS_HELPER
@@ -79,8 +99,6 @@ config DRM_KMS_CMA_HELPER
79 99
80source "drivers/gpu/drm/i2c/Kconfig" 100source "drivers/gpu/drm/i2c/Kconfig"
81 101
82source "drivers/gpu/drm/bridge/Kconfig"
83
84config DRM_TDFX 102config DRM_TDFX
85 tristate "3dfx Banshee/Voodoo3+" 103 tristate "3dfx Banshee/Voodoo3+"
86 depends on DRM && PCI 104 depends on DRM && PCI
@@ -110,6 +128,7 @@ config DRM_RADEON
110 select POWER_SUPPLY 128 select POWER_SUPPLY
111 select HWMON 129 select HWMON
112 select BACKLIGHT_CLASS_DEVICE 130 select BACKLIGHT_CLASS_DEVICE
131 select BACKLIGHT_LCD_SUPPORT
113 select INTERVAL_TREE 132 select INTERVAL_TREE
114 help 133 help
115 Choose this option if you have an ATI Radeon graphics card. There 134 Choose this option if you have an ATI Radeon graphics card. There
@@ -133,6 +152,7 @@ config DRM_AMDGPU
133 select POWER_SUPPLY 152 select POWER_SUPPLY
134 select HWMON 153 select HWMON
135 select BACKLIGHT_CLASS_DEVICE 154 select BACKLIGHT_CLASS_DEVICE
155 select BACKLIGHT_LCD_SUPPORT
136 select INTERVAL_TREE 156 select INTERVAL_TREE
137 help 157 help
138 Choose this option if you have a recent AMD Radeon graphics card. 158 Choose this option if you have a recent AMD Radeon graphics card.
@@ -231,10 +251,14 @@ source "drivers/gpu/drm/virtio/Kconfig"
231 251
232source "drivers/gpu/drm/msm/Kconfig" 252source "drivers/gpu/drm/msm/Kconfig"
233 253
254source "drivers/gpu/drm/fsl-dcu/Kconfig"
255
234source "drivers/gpu/drm/tegra/Kconfig" 256source "drivers/gpu/drm/tegra/Kconfig"
235 257
236source "drivers/gpu/drm/panel/Kconfig" 258source "drivers/gpu/drm/panel/Kconfig"
237 259
260source "drivers/gpu/drm/bridge/Kconfig"
261
238source "drivers/gpu/drm/sti/Kconfig" 262source "drivers/gpu/drm/sti/Kconfig"
239 263
240source "drivers/gpu/drm/amd/amdkfd/Kconfig" 264source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5713d0534504..45e7719846b1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o
23drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 23drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
24 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o 24 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
25drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 25drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
26drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o 26drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
27drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 27drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
28 28
29obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 29obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -70,3 +70,4 @@ obj-$(CONFIG_DRM_IMX) += imx/
70obj-y += i2c/ 70obj-y += i2c/
71obj-y += panel/ 71obj-y += panel/
72obj-y += bridge/ 72obj-y += bridge/
73obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 908360584e4d..04c270757030 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -3,7 +3,9 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ 5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
6 -Idrivers/gpu/drm/amd/include 6 -Idrivers/gpu/drm/amd/include \
7 -Idrivers/gpu/drm/amd/amdgpu \
8 -Idrivers/gpu/drm/amd/scheduler
7 9
8amdgpu-y := amdgpu_drv.o 10amdgpu-y := amdgpu_drv.o
9 11
@@ -21,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
21 23
22# add asic specific block 24# add asic specific block
23amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ 25amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
24 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o 26 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
27 amdgpu_amdkfd_gfx_v7.o
25 28
26amdgpu-y += \ 29amdgpu-y += \
27 vi.o 30 vi.o
@@ -43,6 +46,7 @@ amdgpu-y += \
43 amdgpu_dpm.o \ 46 amdgpu_dpm.o \
44 cz_smc.o cz_dpm.o \ 47 cz_smc.o cz_dpm.o \
45 tonga_smc.o tonga_dpm.o \ 48 tonga_smc.o tonga_dpm.o \
49 fiji_smc.o fiji_dpm.o \
46 iceland_smc.o iceland_dpm.o 50 iceland_smc.o iceland_dpm.o
47 51
48# add DCE block 52# add DCE block
@@ -74,9 +78,17 @@ amdgpu-y += \
74# add amdkfd interfaces 78# add amdkfd interfaces
75amdgpu-y += \ 79amdgpu-y += \
76 amdgpu_amdkfd.o \ 80 amdgpu_amdkfd.o \
77 amdgpu_amdkfd_gfx_v7.o \
78 amdgpu_amdkfd_gfx_v8.o 81 amdgpu_amdkfd_gfx_v8.o
79 82
83# add cgs
84amdgpu-y += amdgpu_cgs.o
85
86# GPU scheduler
87amdgpu-y += \
88 ../scheduler/gpu_scheduler.o \
89 ../scheduler/sched_fence.o \
90 amdgpu_sched.o
91
80amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o 92amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
81amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o 93amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
82amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o 94amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index baefa635169a..2fc58e658986 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -42,17 +42,19 @@
42#include <ttm/ttm_module.h> 42#include <ttm/ttm_module.h>
43#include <ttm/ttm_execbuf_util.h> 43#include <ttm/ttm_execbuf_util.h>
44 44
45#include <drm/drmP.h>
45#include <drm/drm_gem.h> 46#include <drm/drm_gem.h>
46#include <drm/amdgpu_drm.h> 47#include <drm/amdgpu_drm.h>
47 48
48#include "amd_shared.h" 49#include "amd_shared.h"
49#include "amdgpu_family.h"
50#include "amdgpu_mode.h" 50#include "amdgpu_mode.h"
51#include "amdgpu_ih.h" 51#include "amdgpu_ih.h"
52#include "amdgpu_irq.h" 52#include "amdgpu_irq.h"
53#include "amdgpu_ucode.h" 53#include "amdgpu_ucode.h"
54#include "amdgpu_gds.h" 54#include "amdgpu_gds.h"
55 55
56#include "gpu_scheduler.h"
57
56/* 58/*
57 * Modules parameters. 59 * Modules parameters.
58 */ 60 */
@@ -77,7 +79,11 @@ extern int amdgpu_bapm;
77extern int amdgpu_deep_color; 79extern int amdgpu_deep_color;
78extern int amdgpu_vm_size; 80extern int amdgpu_vm_size;
79extern int amdgpu_vm_block_size; 81extern int amdgpu_vm_block_size;
82extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission;
80 85
86#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
81#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 87#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
82#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 88#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
83/* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 89/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -178,6 +184,7 @@ struct amdgpu_ring;
178struct amdgpu_semaphore; 184struct amdgpu_semaphore;
179struct amdgpu_cs_parser; 185struct amdgpu_cs_parser;
180struct amdgpu_irq_src; 186struct amdgpu_irq_src;
187struct amdgpu_fpriv;
181 188
182enum amdgpu_cp_irq { 189enum amdgpu_cp_irq {
183 AMDGPU_CP_IRQ_GFX_EOP = 0, 190 AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -381,10 +388,10 @@ struct amdgpu_fence_driver {
381 uint64_t sync_seq[AMDGPU_MAX_RINGS]; 388 uint64_t sync_seq[AMDGPU_MAX_RINGS];
382 atomic64_t last_seq; 389 atomic64_t last_seq;
383 bool initialized; 390 bool initialized;
384 bool delayed_irq;
385 struct amdgpu_irq_src *irq_src; 391 struct amdgpu_irq_src *irq_src;
386 unsigned irq_type; 392 unsigned irq_type;
387 struct delayed_work lockup_work; 393 struct delayed_work lockup_work;
394 wait_queue_head_t fence_queue;
388}; 395};
389 396
390/* some special values for the owner field */ 397/* some special values for the owner field */
@@ -423,20 +430,18 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
423int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 430int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
424 struct amdgpu_irq_src *irq_src, 431 struct amdgpu_irq_src *irq_src,
425 unsigned irq_type); 432 unsigned irq_type);
433void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
434void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
426int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 435int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
427 struct amdgpu_fence **fence); 436 struct amdgpu_fence **fence);
428int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
429 uint64_t seq, struct amdgpu_fence **fence);
430void amdgpu_fence_process(struct amdgpu_ring *ring); 437void amdgpu_fence_process(struct amdgpu_ring *ring);
431int amdgpu_fence_wait_next(struct amdgpu_ring *ring); 438int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
432int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 439int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
433unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 440unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
434 441
435bool amdgpu_fence_signaled(struct amdgpu_fence *fence); 442signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
436int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
437int amdgpu_fence_wait_any(struct amdgpu_device *adev,
438 struct amdgpu_fence **fences, 443 struct amdgpu_fence **fences,
439 bool intr); 444 bool intr, long t);
440struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); 445struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
441void amdgpu_fence_unref(struct amdgpu_fence **fence); 446void amdgpu_fence_unref(struct amdgpu_fence **fence);
442 447
@@ -481,7 +486,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
481 return a->seq < b->seq; 486 return a->seq < b->seq;
482} 487}
483 488
484int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 489int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
485 void *owner, struct amdgpu_fence **fence); 490 void *owner, struct amdgpu_fence **fence);
486 491
487/* 492/*
@@ -532,14 +537,16 @@ struct amdgpu_bo_va_mapping {
532struct amdgpu_bo_va { 537struct amdgpu_bo_va {
533 /* protected by bo being reserved */ 538 /* protected by bo being reserved */
534 struct list_head bo_list; 539 struct list_head bo_list;
535 uint64_t addr; 540 struct fence *last_pt_update;
536 struct amdgpu_fence *last_pt_update;
537 unsigned ref_count; 541 unsigned ref_count;
538 542
539 /* protected by vm mutex */ 543 /* protected by vm mutex and spinlock */
540 struct list_head mappings;
541 struct list_head vm_status; 544 struct list_head vm_status;
542 545
546 /* mappings for this bo_va */
547 struct list_head invalids;
548 struct list_head valids;
549
543 /* constant after initialization */ 550 /* constant after initialization */
544 struct amdgpu_vm *vm; 551 struct amdgpu_vm *vm;
545 struct amdgpu_bo *bo; 552 struct amdgpu_bo *bo;
@@ -697,8 +704,8 @@ struct amdgpu_sync {
697}; 704};
698 705
699void amdgpu_sync_create(struct amdgpu_sync *sync); 706void amdgpu_sync_create(struct amdgpu_sync *sync);
700void amdgpu_sync_fence(struct amdgpu_sync *sync, 707int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
701 struct amdgpu_fence *fence); 708 struct fence *f);
702int amdgpu_sync_resv(struct amdgpu_device *adev, 709int amdgpu_sync_resv(struct amdgpu_device *adev,
703 struct amdgpu_sync *sync, 710 struct amdgpu_sync *sync,
704 struct reservation_object *resv, 711 struct reservation_object *resv,
@@ -821,7 +828,9 @@ struct amdgpu_flip_work {
821 uint64_t base; 828 uint64_t base;
822 struct drm_pending_vblank_event *event; 829 struct drm_pending_vblank_event *event;
823 struct amdgpu_bo *old_rbo; 830 struct amdgpu_bo *old_rbo;
824 struct fence *fence; 831 struct fence *excl;
832 unsigned shared_count;
833 struct fence **shared;
825}; 834};
826 835
827 836
@@ -844,6 +853,8 @@ struct amdgpu_ib {
844 uint32_t gws_base, gws_size; 853 uint32_t gws_base, gws_size;
845 uint32_t oa_base, oa_size; 854 uint32_t oa_base, oa_size;
846 uint32_t flags; 855 uint32_t flags;
856 /* resulting sequence number */
857 uint64_t sequence;
847}; 858};
848 859
849enum amdgpu_ring_type { 860enum amdgpu_ring_type {
@@ -854,11 +865,23 @@ enum amdgpu_ring_type {
854 AMDGPU_RING_TYPE_VCE 865 AMDGPU_RING_TYPE_VCE
855}; 866};
856 867
868extern struct amd_sched_backend_ops amdgpu_sched_ops;
869
870int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
871 struct amdgpu_ring *ring,
872 struct amdgpu_ib *ibs,
873 unsigned num_ibs,
874 int (*free_job)(struct amdgpu_cs_parser *),
875 void *owner,
876 struct fence **fence);
877
857struct amdgpu_ring { 878struct amdgpu_ring {
858 struct amdgpu_device *adev; 879 struct amdgpu_device *adev;
859 const struct amdgpu_ring_funcs *funcs; 880 const struct amdgpu_ring_funcs *funcs;
860 struct amdgpu_fence_driver fence_drv; 881 struct amdgpu_fence_driver fence_drv;
882 struct amd_gpu_scheduler *scheduler;
861 883
884 spinlock_t fence_lock;
862 struct mutex *ring_lock; 885 struct mutex *ring_lock;
863 struct amdgpu_bo *ring_obj; 886 struct amdgpu_bo *ring_obj;
864 volatile uint32_t *ring; 887 volatile uint32_t *ring;
@@ -892,6 +915,7 @@ struct amdgpu_ring {
892 struct amdgpu_ctx *current_ctx; 915 struct amdgpu_ctx *current_ctx;
893 enum amdgpu_ring_type type; 916 enum amdgpu_ring_type type;
894 char name[16]; 917 char name[16];
918 bool is_pte_ring;
895}; 919};
896 920
897/* 921/*
@@ -943,18 +967,22 @@ struct amdgpu_vm {
943 967
944 struct rb_root va; 968 struct rb_root va;
945 969
946 /* protecting invalidated and freed */ 970 /* protecting invalidated */
947 spinlock_t status_lock; 971 spinlock_t status_lock;
948 972
949 /* BOs moved, but not yet updated in the PT */ 973 /* BOs moved, but not yet updated in the PT */
950 struct list_head invalidated; 974 struct list_head invalidated;
951 975
952 /* BOs freed, but not yet updated in the PT */ 976 /* BOs cleared in the PT because of a move */
977 struct list_head cleared;
978
979 /* BO mappings freed, but not yet updated in the PT */
953 struct list_head freed; 980 struct list_head freed;
954 981
955 /* contains the page directory */ 982 /* contains the page directory */
956 struct amdgpu_bo *page_directory; 983 struct amdgpu_bo *page_directory;
957 unsigned max_pde_used; 984 unsigned max_pde_used;
985 struct fence *page_directory_fence;
958 986
959 /* array of page tables, one for each page directory entry */ 987 /* array of page tables, one for each page directory entry */
960 struct amdgpu_vm_pt *page_tables; 988 struct amdgpu_vm_pt *page_tables;
@@ -983,27 +1011,47 @@ struct amdgpu_vm_manager {
983 * context related structures 1011 * context related structures
984 */ 1012 */
985 1013
986struct amdgpu_ctx_state { 1014#define AMDGPU_CTX_MAX_CS_PENDING 16
987 uint64_t flags; 1015
988 uint32_t hangs; 1016struct amdgpu_ctx_ring {
1017 uint64_t sequence;
1018 struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
1019 struct amd_sched_entity entity;
989}; 1020};
990 1021
991struct amdgpu_ctx { 1022struct amdgpu_ctx {
992 /* call kref_get()before CS start and kref_put() after CS fence signaled */ 1023 struct kref refcount;
993 struct kref refcount; 1024 struct amdgpu_device *adev;
994 struct amdgpu_fpriv *fpriv; 1025 unsigned reset_counter;
995 struct amdgpu_ctx_state state; 1026 spinlock_t ring_lock;
996 uint32_t id; 1027 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
997 unsigned reset_counter;
998}; 1028};
999 1029
1000struct amdgpu_ctx_mgr { 1030struct amdgpu_ctx_mgr {
1001 struct amdgpu_device *adev; 1031 struct amdgpu_device *adev;
1002 struct idr ctx_handles; 1032 struct mutex lock;
1003 /* lock for IDR system */ 1033 /* protected by lock */
1004 struct mutex lock; 1034 struct idr ctx_handles;
1005}; 1035};
1006 1036
1037int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
1038 struct amdgpu_ctx *ctx);
1039void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
1040
1041struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1042int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
1043
1044uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
1045 struct fence *fence, uint64_t queued_seq);
1046struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
1047 struct amdgpu_ring *ring, uint64_t seq);
1048
1049int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1050 struct drm_file *filp);
1051
1052void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
1053void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
1054
1007/* 1055/*
1008 * file private structure 1056 * file private structure
1009 */ 1057 */
@@ -1012,7 +1060,7 @@ struct amdgpu_fpriv {
1012 struct amdgpu_vm vm; 1060 struct amdgpu_vm vm;
1013 struct mutex bo_list_lock; 1061 struct mutex bo_list_lock;
1014 struct idr bo_list_handles; 1062 struct idr bo_list_handles;
1015 struct amdgpu_ctx_mgr ctx_mgr; 1063 struct amdgpu_ctx_mgr ctx_mgr;
1016}; 1064};
1017 1065
1018/* 1066/*
@@ -1030,6 +1078,8 @@ struct amdgpu_bo_list {
1030}; 1078};
1031 1079
1032struct amdgpu_bo_list * 1080struct amdgpu_bo_list *
1081amdgpu_bo_list_clone(struct amdgpu_bo_list *list);
1082struct amdgpu_bo_list *
1033amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); 1083amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1034void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 1084void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
1035void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 1085void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
@@ -1205,6 +1255,14 @@ struct amdgpu_cs_parser {
1205 1255
1206 /* user fence */ 1256 /* user fence */
1207 struct amdgpu_user_fence uf; 1257 struct amdgpu_user_fence uf;
1258
1259 struct amdgpu_ring *ring;
1260 struct mutex job_lock;
1261 struct work_struct job_work;
1262 int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
1263 int (*run_job)(struct amdgpu_cs_parser *sched_job);
1264 int (*free_job)(struct amdgpu_cs_parser *sched_job);
1265 struct amd_sched_fence *s_fence;
1208}; 1266};
1209 1267
1210static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1268static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
@@ -1849,17 +1907,12 @@ struct amdgpu_atcs {
1849 struct amdgpu_atcs_functions functions; 1907 struct amdgpu_atcs_functions functions;
1850}; 1908};
1851 1909
1852int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, 1910/*
1853 uint32_t *id,uint32_t flags); 1911 * CGS
1854int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, 1912 */
1855 uint32_t id); 1913void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1856 1914void amdgpu_cgs_destroy_device(void *cgs_device);
1857void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
1858struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1859int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
1860 1915
1861extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1862 struct drm_file *filp);
1863 1916
1864/* 1917/*
1865 * Core structure, functions and helpers. 1918 * Core structure, functions and helpers.
@@ -1883,7 +1936,7 @@ struct amdgpu_device {
1883 struct rw_semaphore exclusive_lock; 1936 struct rw_semaphore exclusive_lock;
1884 1937
1885 /* ASIC */ 1938 /* ASIC */
1886 enum amdgpu_asic_type asic_type; 1939 enum amd_asic_type asic_type;
1887 uint32_t family; 1940 uint32_t family;
1888 uint32_t rev_id; 1941 uint32_t rev_id;
1889 uint32_t external_rev_id; 1942 uint32_t external_rev_id;
@@ -1976,7 +2029,6 @@ struct amdgpu_device {
1976 struct amdgpu_irq_src hpd_irq; 2029 struct amdgpu_irq_src hpd_irq;
1977 2030
1978 /* rings */ 2031 /* rings */
1979 wait_queue_head_t fence_queue;
1980 unsigned fence_context; 2032 unsigned fence_context;
1981 struct mutex ring_lock; 2033 struct mutex ring_lock;
1982 unsigned num_rings; 2034 unsigned num_rings;
@@ -2028,6 +2080,9 @@ struct amdgpu_device {
2028 2080
2029 /* amdkfd interface */ 2081 /* amdkfd interface */
2030 struct kfd_dev *kfd; 2082 struct kfd_dev *kfd;
2083
2084 /* kernel conext for IB submission */
2085 struct amdgpu_ctx kernel_ctx;
2031}; 2086};
2032 2087
2033bool amdgpu_device_is_px(struct drm_device *dev); 2088bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2215,6 +2270,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2215bool amdgpu_card_posted(struct amdgpu_device *adev); 2270bool amdgpu_card_posted(struct amdgpu_device *adev);
2216void amdgpu_update_display_priority(struct amdgpu_device *adev); 2271void amdgpu_update_display_priority(struct amdgpu_device *adev);
2217bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2272bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2273struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2274 struct drm_file *filp,
2275 struct amdgpu_ctx *ctx,
2276 struct amdgpu_ib *ibs,
2277 uint32_t num_ibs);
2278
2218int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2279int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2219int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2280int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2220 u32 ip_instance, u32 ring, 2281 u32 ip_instance, u32 ring,
@@ -2278,8 +2339,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2278struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 2339struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2279 struct amdgpu_vm *vm, 2340 struct amdgpu_vm *vm,
2280 struct list_head *head); 2341 struct list_head *head);
2281struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, 2342int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2282 struct amdgpu_vm *vm); 2343 struct amdgpu_sync *sync);
2283void amdgpu_vm_flush(struct amdgpu_ring *ring, 2344void amdgpu_vm_flush(struct amdgpu_ring *ring,
2284 struct amdgpu_vm *vm, 2345 struct amdgpu_vm *vm,
2285 struct amdgpu_fence *updates); 2346 struct amdgpu_fence *updates);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index bc763e0c8f4c..496ed2192eba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -21,7 +21,7 @@
21 */ 21 */
22 22
23#include "amdgpu_amdkfd.h" 23#include "amdgpu_amdkfd.h"
24#include "amdgpu_family.h" 24#include "amd_shared.h"
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include <linux/module.h> 27#include <linux/module.h>
@@ -50,9 +50,11 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
50#endif 50#endif
51 51
52 switch (rdev->asic_type) { 52 switch (rdev->asic_type) {
53#ifdef CONFIG_DRM_AMDGPU_CIK
53 case CHIP_KAVERI: 54 case CHIP_KAVERI:
54 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); 55 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
55 break; 56 break;
57#endif
56 case CHIP_CARRIZO: 58 case CHIP_CARRIZO:
57 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); 59 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
58 break; 60 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 2daad335b809..dd2037bc0b4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -450,7 +450,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
450 450
451 while (true) { 451 while (true) {
452 temp = RREG32(mmCP_HQD_ACTIVE); 452 temp = RREG32(mmCP_HQD_ACTIVE);
453 if (temp & CP_HQD_ACTIVE__ACTIVE__SHIFT) 453 if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
454 break; 454 break;
455 if (timeout == 0) { 455 if (timeout == 0) {
456 pr_err("kfd: cp queue preemption time out (%dms)\n", 456 pr_err("kfd: cp queue preemption time out (%dms)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 6a588371d54a..77f1d7c6ea3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -897,7 +897,7 @@ bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
897 if ((id == ASIC_INTERNAL_ENGINE_SS) || 897 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
898 (id == ASIC_INTERNAL_MEMORY_SS)) 898 (id == ASIC_INTERNAL_MEMORY_SS))
899 ss->rate /= 100; 899 ss->rate /= 100;
900 if (adev->flags & AMDGPU_IS_APU) 900 if (adev->flags & AMD_IS_APU)
901 amdgpu_atombios_get_igp_ss_overrides(adev, ss, id); 901 amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
902 return true; 902 return true;
903 } 903 }
@@ -1058,7 +1058,7 @@ void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
1058 SET_MEMORY_CLOCK_PS_ALLOCATION args; 1058 SET_MEMORY_CLOCK_PS_ALLOCATION args;
1059 int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); 1059 int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
1060 1060
1061 if (adev->flags & AMDGPU_IS_APU) 1061 if (adev->flags & AMD_IS_APU)
1062 return; 1062 return;
1063 1063
1064 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ 1064 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2742b9a35cbc..759482e4300d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -42,7 +42,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
42 r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); 42 r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
43 if (r) 43 if (r)
44 goto exit_do_move; 44 goto exit_do_move;
45 r = amdgpu_fence_wait(fence, false); 45 r = fence_wait(&fence->base, false);
46 if (r) 46 if (r)
47 goto exit_do_move; 47 goto exit_do_move;
48 amdgpu_fence_unref(&fence); 48 amdgpu_fence_unref(&fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index ceb444f6d418..02add0a508cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -48,7 +48,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
48 resource_size_t vram_base; 48 resource_size_t vram_base;
49 resource_size_t size = 256 * 1024; /* ??? */ 49 resource_size_t size = 256 * 1024; /* ??? */
50 50
51 if (!(adev->flags & AMDGPU_IS_APU)) 51 if (!(adev->flags & AMD_IS_APU))
52 if (!amdgpu_card_posted(adev)) 52 if (!amdgpu_card_posted(adev))
53 return false; 53 return false;
54 54
@@ -184,7 +184,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
184 bool found = false; 184 bool found = false;
185 185
186 /* ATRM is for the discrete card only */ 186 /* ATRM is for the discrete card only */
187 if (adev->flags & AMDGPU_IS_APU) 187 if (adev->flags & AMD_IS_APU)
188 return false; 188 return false;
189 189
190 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 190 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
@@ -246,7 +246,7 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
246 246
247static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) 247static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
248{ 248{
249 if (adev->flags & AMDGPU_IS_APU) 249 if (adev->flags & AMD_IS_APU)
250 return igp_read_bios_from_vram(adev); 250 return igp_read_bios_from_vram(adev);
251 else 251 else
252 return amdgpu_asic_read_disabled_bios(adev); 252 return amdgpu_asic_read_disabled_bios(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f82a2dd83874..7eed523bf28f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -62,6 +62,39 @@ static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
62 return 0; 62 return 0;
63} 63}
64 64
65struct amdgpu_bo_list *
66amdgpu_bo_list_clone(struct amdgpu_bo_list *list)
67{
68 struct amdgpu_bo_list *result;
69 unsigned i;
70
71 result = kmalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
72 if (!result)
73 return NULL;
74
75 result->array = drm_calloc_large(list->num_entries,
76 sizeof(struct amdgpu_bo_list_entry));
77 if (!result->array) {
78 kfree(result);
79 return NULL;
80 }
81
82 mutex_init(&result->lock);
83 result->gds_obj = list->gds_obj;
84 result->gws_obj = list->gws_obj;
85 result->oa_obj = list->oa_obj;
86 result->has_userptr = list->has_userptr;
87 result->num_entries = list->num_entries;
88
89 memcpy(result->array, list->array, list->num_entries *
90 sizeof(struct amdgpu_bo_list_entry));
91
92 for (i = 0; i < result->num_entries; ++i)
93 amdgpu_bo_ref(result->array[i].robj);
94
95 return result;
96}
97
65static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) 98static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
66{ 99{
67 struct amdgpu_bo_list *list; 100 struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
new file mode 100644
index 000000000000..6b1243f9f86d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -0,0 +1,838 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/list.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <drm/drmP.h>
28#include <linux/firmware.h>
29#include <drm/amdgpu_drm.h>
30#include "amdgpu.h"
31#include "cgs_linux.h"
32#include "atom.h"
33#include "amdgpu_ucode.h"
34
35
36struct amdgpu_cgs_device {
37 struct cgs_device base;
38 struct amdgpu_device *adev;
39};
40
41#define CGS_FUNC_ADEV \
42 struct amdgpu_device *adev = \
43 ((struct amdgpu_cgs_device *)cgs_device)->adev
44
45static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
46 uint64_t *mc_start, uint64_t *mc_size,
47 uint64_t *mem_size)
48{
49 CGS_FUNC_ADEV;
50 switch(type) {
51 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
52 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
53 *mc_start = 0;
54 *mc_size = adev->mc.visible_vram_size;
55 *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
56 break;
57 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
58 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
59 *mc_start = adev->mc.visible_vram_size;
60 *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
61 *mem_size = *mc_size;
62 break;
63 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
64 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
65 *mc_start = adev->mc.gtt_start;
66 *mc_size = adev->mc.gtt_size;
67 *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
68 break;
69 default:
70 return -EINVAL;
71 }
72
73 return 0;
74}
75
76static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
77 uint64_t size,
78 uint64_t min_offset, uint64_t max_offset,
79 cgs_handle_t *kmem_handle, uint64_t *mcaddr)
80{
81 CGS_FUNC_ADEV;
82 int ret;
83 struct amdgpu_bo *bo;
84 struct page *kmem_page = vmalloc_to_page(kmem);
85 int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
86
87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
90 if (ret)
91 return ret;
92 ret = amdgpu_bo_reserve(bo, false);
93 if (unlikely(ret != 0))
94 return ret;
95
96 /* pin buffer into GTT */
97 ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
98 min_offset, max_offset, mcaddr);
99 amdgpu_bo_unreserve(bo);
100
101 *kmem_handle = (cgs_handle_t)bo;
102 return ret;
103}
104
105static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
106{
107 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
108
109 if (obj) {
110 int r = amdgpu_bo_reserve(obj, false);
111 if (likely(r == 0)) {
112 amdgpu_bo_unpin(obj);
113 amdgpu_bo_unreserve(obj);
114 }
115 amdgpu_bo_unref(&obj);
116
117 }
118 return 0;
119}
120
121static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
122 enum cgs_gpu_mem_type type,
123 uint64_t size, uint64_t align,
124 uint64_t min_offset, uint64_t max_offset,
125 cgs_handle_t *handle)
126{
127 CGS_FUNC_ADEV;
128 uint16_t flags = 0;
129 int ret = 0;
130 uint32_t domain = 0;
131 struct amdgpu_bo *obj;
132 struct ttm_placement placement;
133 struct ttm_place place;
134
135 if (min_offset > max_offset) {
136 BUG_ON(1);
137 return -EINVAL;
138 }
139
140 /* fail if the alignment is not a power of 2 */
141 if (((align != 1) && (align & (align - 1)))
142 || size == 0 || align == 0)
143 return -EINVAL;
144
145
146 switch(type) {
147 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
148 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
149 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
150 domain = AMDGPU_GEM_DOMAIN_VRAM;
151 if (max_offset > adev->mc.real_vram_size)
152 return -EINVAL;
153 place.fpfn = min_offset >> PAGE_SHIFT;
154 place.lpfn = max_offset >> PAGE_SHIFT;
155 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
156 TTM_PL_FLAG_VRAM;
157 break;
158 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
159 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
160 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
161 domain = AMDGPU_GEM_DOMAIN_VRAM;
162 if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
163 place.fpfn =
164 max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
165 place.lpfn =
166 min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
167 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
168 TTM_PL_FLAG_VRAM;
169 }
170
171 break;
172 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
173 domain = AMDGPU_GEM_DOMAIN_GTT;
174 place.fpfn = min_offset >> PAGE_SHIFT;
175 place.lpfn = max_offset >> PAGE_SHIFT;
176 place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
177 break;
178 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
179 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
180 domain = AMDGPU_GEM_DOMAIN_GTT;
181 place.fpfn = min_offset >> PAGE_SHIFT;
182 place.lpfn = max_offset >> PAGE_SHIFT;
183 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
184 TTM_PL_FLAG_UNCACHED;
185 break;
186 default:
187 return -EINVAL;
188 }
189
190
191 *handle = 0;
192
193 placement.placement = &place;
194 placement.num_placement = 1;
195 placement.busy_placement = &place;
196 placement.num_busy_placement = 1;
197
198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
199 true, domain, flags,
200 NULL, &placement, &obj);
201 if (ret) {
202 DRM_ERROR("(%d) bo create failed\n", ret);
203 return ret;
204 }
205 *handle = (cgs_handle_t)obj;
206
207 return ret;
208}
209
210static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
211 cgs_handle_t *handle)
212{
213 CGS_FUNC_ADEV;
214 int r;
215 uint32_t dma_handle;
216 struct drm_gem_object *obj;
217 struct amdgpu_bo *bo;
218 struct drm_device *dev = adev->ddev;
219 struct drm_file *file_priv = NULL, *priv;
220
221 mutex_lock(&dev->struct_mutex);
222 list_for_each_entry(priv, &dev->filelist, lhead) {
223 rcu_read_lock();
224 if (priv->pid == get_pid(task_pid(current)))
225 file_priv = priv;
226 rcu_read_unlock();
227 if (file_priv)
228 break;
229 }
230 mutex_unlock(&dev->struct_mutex);
231 r = dev->driver->prime_fd_to_handle(dev,
232 file_priv, dmabuf_fd,
233 &dma_handle);
234 spin_lock(&file_priv->table_lock);
235
236 /* Check if we currently have a reference on the object */
237 obj = idr_find(&file_priv->object_idr, dma_handle);
238 if (obj == NULL) {
239 spin_unlock(&file_priv->table_lock);
240 return -EINVAL;
241 }
242 spin_unlock(&file_priv->table_lock);
243 bo = gem_to_amdgpu_bo(obj);
244 *handle = (cgs_handle_t)bo;
245 return 0;
246}
247
248static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
249{
250 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
251
252 if (obj) {
253 int r = amdgpu_bo_reserve(obj, false);
254 if (likely(r == 0)) {
255 amdgpu_bo_kunmap(obj);
256 amdgpu_bo_unpin(obj);
257 amdgpu_bo_unreserve(obj);
258 }
259 amdgpu_bo_unref(&obj);
260
261 }
262 return 0;
263}
264
265static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
266 uint64_t *mcaddr)
267{
268 int r;
269 u64 min_offset, max_offset;
270 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
271
272 WARN_ON_ONCE(obj->placement.num_placement > 1);
273
274 min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
275 max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
276
277 r = amdgpu_bo_reserve(obj, false);
278 if (unlikely(r != 0))
279 return r;
280 r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
281 min_offset, max_offset, mcaddr);
282 amdgpu_bo_unreserve(obj);
283 return r;
284}
285
286static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
287{
288 int r;
289 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
290 r = amdgpu_bo_reserve(obj, false);
291 if (unlikely(r != 0))
292 return r;
293 r = amdgpu_bo_unpin(obj);
294 amdgpu_bo_unreserve(obj);
295 return r;
296}
297
298static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
299 void **map)
300{
301 int r;
302 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
303 r = amdgpu_bo_reserve(obj, false);
304 if (unlikely(r != 0))
305 return r;
306 r = amdgpu_bo_kmap(obj, map);
307 amdgpu_bo_unreserve(obj);
308 return r;
309}
310
311static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
312{
313 int r;
314 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
315 r = amdgpu_bo_reserve(obj, false);
316 if (unlikely(r != 0))
317 return r;
318 amdgpu_bo_kunmap(obj);
319 amdgpu_bo_unreserve(obj);
320 return r;
321}
322
323static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
324{
325 CGS_FUNC_ADEV;
326 return RREG32(offset);
327}
328
329static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
330 uint32_t value)
331{
332 CGS_FUNC_ADEV;
333 WREG32(offset, value);
334}
335
336static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
337 enum cgs_ind_reg space,
338 unsigned index)
339{
340 CGS_FUNC_ADEV;
341 switch (space) {
342 case CGS_IND_REG__MMIO:
343 return RREG32_IDX(index);
344 case CGS_IND_REG__PCIE:
345 return RREG32_PCIE(index);
346 case CGS_IND_REG__SMC:
347 return RREG32_SMC(index);
348 case CGS_IND_REG__UVD_CTX:
349 return RREG32_UVD_CTX(index);
350 case CGS_IND_REG__DIDT:
351 return RREG32_DIDT(index);
352 case CGS_IND_REG__AUDIO_ENDPT:
353 DRM_ERROR("audio endpt register access not implemented.\n");
354 return 0;
355 }
356 WARN(1, "Invalid indirect register space");
357 return 0;
358}
359
360static void amdgpu_cgs_write_ind_register(void *cgs_device,
361 enum cgs_ind_reg space,
362 unsigned index, uint32_t value)
363{
364 CGS_FUNC_ADEV;
365 switch (space) {
366 case CGS_IND_REG__MMIO:
367 return WREG32_IDX(index, value);
368 case CGS_IND_REG__PCIE:
369 return WREG32_PCIE(index, value);
370 case CGS_IND_REG__SMC:
371 return WREG32_SMC(index, value);
372 case CGS_IND_REG__UVD_CTX:
373 return WREG32_UVD_CTX(index, value);
374 case CGS_IND_REG__DIDT:
375 return WREG32_DIDT(index, value);
376 case CGS_IND_REG__AUDIO_ENDPT:
377 DRM_ERROR("audio endpt register access not implemented.\n");
378 return;
379 }
380 WARN(1, "Invalid indirect register space");
381}
382
383static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
384{
385 CGS_FUNC_ADEV;
386 uint8_t val;
387 int ret = pci_read_config_byte(adev->pdev, addr, &val);
388 if (WARN(ret, "pci_read_config_byte error"))
389 return 0;
390 return val;
391}
392
393static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
394{
395 CGS_FUNC_ADEV;
396 uint16_t val;
397 int ret = pci_read_config_word(adev->pdev, addr, &val);
398 if (WARN(ret, "pci_read_config_word error"))
399 return 0;
400 return val;
401}
402
403static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
404 unsigned addr)
405{
406 CGS_FUNC_ADEV;
407 uint32_t val;
408 int ret = pci_read_config_dword(adev->pdev, addr, &val);
409 if (WARN(ret, "pci_read_config_dword error"))
410 return 0;
411 return val;
412}
413
414static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
415 uint8_t value)
416{
417 CGS_FUNC_ADEV;
418 int ret = pci_write_config_byte(adev->pdev, addr, value);
419 WARN(ret, "pci_write_config_byte error");
420}
421
422static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
423 uint16_t value)
424{
425 CGS_FUNC_ADEV;
426 int ret = pci_write_config_word(adev->pdev, addr, value);
427 WARN(ret, "pci_write_config_word error");
428}
429
430static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
431 uint32_t value)
432{
433 CGS_FUNC_ADEV;
434 int ret = pci_write_config_dword(adev->pdev, addr, value);
435 WARN(ret, "pci_write_config_dword error");
436}
437
438static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
439 unsigned table, uint16_t *size,
440 uint8_t *frev, uint8_t *crev)
441{
442 CGS_FUNC_ADEV;
443 uint16_t data_start;
444
445 if (amdgpu_atom_parse_data_header(
446 adev->mode_info.atom_context, table, size,
447 frev, crev, &data_start))
448 return (uint8_t*)adev->mode_info.atom_context->bios +
449 data_start;
450
451 return NULL;
452}
453
454static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
455 uint8_t *frev, uint8_t *crev)
456{
457 CGS_FUNC_ADEV;
458
459 if (amdgpu_atom_parse_cmd_header(
460 adev->mode_info.atom_context, table,
461 frev, crev))
462 return 0;
463
464 return -EINVAL;
465}
466
467static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
468 void *args)
469{
470 CGS_FUNC_ADEV;
471
472 return amdgpu_atom_execute_table(
473 adev->mode_info.atom_context, table, args);
474}
475
476static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
477{
478 /* TODO */
479 return 0;
480}
481
482static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
483{
484 /* TODO */
485 return 0;
486}
487
488static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
489 int active)
490{
491 /* TODO */
492 return 0;
493}
494
495static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
496 enum cgs_clock clock, unsigned freq)
497{
498 /* TODO */
499 return 0;
500}
501
502static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
503 enum cgs_engine engine, int powered)
504{
505 /* TODO */
506 return 0;
507}
508
509
510
511static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
512 enum cgs_clock clock,
513 struct cgs_clock_limits *limits)
514{
515 /* TODO */
516 return 0;
517}
518
519static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
520 const uint32_t *voltages)
521{
522 DRM_ERROR("not implemented");
523 return -EPERM;
524}
525
526struct cgs_irq_params {
527 unsigned src_id;
528 cgs_irq_source_set_func_t set;
529 cgs_irq_handler_func_t handler;
530 void *private_data;
531};
532
533static int cgs_set_irq_state(struct amdgpu_device *adev,
534 struct amdgpu_irq_src *src,
535 unsigned type,
536 enum amdgpu_interrupt_state state)
537{
538 struct cgs_irq_params *irq_params =
539 (struct cgs_irq_params *)src->data;
540 if (!irq_params)
541 return -EINVAL;
542 if (!irq_params->set)
543 return -EINVAL;
544 return irq_params->set(irq_params->private_data,
545 irq_params->src_id,
546 type,
547 (int)state);
548}
549
550static int cgs_process_irq(struct amdgpu_device *adev,
551 struct amdgpu_irq_src *source,
552 struct amdgpu_iv_entry *entry)
553{
554 struct cgs_irq_params *irq_params =
555 (struct cgs_irq_params *)source->data;
556 if (!irq_params)
557 return -EINVAL;
558 if (!irq_params->handler)
559 return -EINVAL;
560 return irq_params->handler(irq_params->private_data,
561 irq_params->src_id,
562 entry->iv_entry);
563}
564
565static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
566 .set = cgs_set_irq_state,
567 .process = cgs_process_irq,
568};
569
570static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
571 unsigned num_types,
572 cgs_irq_source_set_func_t set,
573 cgs_irq_handler_func_t handler,
574 void *private_data)
575{
576 CGS_FUNC_ADEV;
577 int ret = 0;
578 struct cgs_irq_params *irq_params;
579 struct amdgpu_irq_src *source =
580 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
581 if (!source)
582 return -ENOMEM;
583 irq_params =
584 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
585 if (!irq_params) {
586 kfree(source);
587 return -ENOMEM;
588 }
589 source->num_types = num_types;
590 source->funcs = &cgs_irq_funcs;
591 irq_params->src_id = src_id;
592 irq_params->set = set;
593 irq_params->handler = handler;
594 irq_params->private_data = private_data;
595 source->data = (void *)irq_params;
596 ret = amdgpu_irq_add_id(adev, src_id, source);
597 if (ret) {
598 kfree(irq_params);
599 kfree(source);
600 }
601
602 return ret;
603}
604
605static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
606{
607 CGS_FUNC_ADEV;
608 return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
609}
610
611static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
612{
613 CGS_FUNC_ADEV;
614 return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
615}
616
617int amdgpu_cgs_set_clockgating_state(void *cgs_device,
618 enum amd_ip_block_type block_type,
619 enum amd_clockgating_state state)
620{
621 CGS_FUNC_ADEV;
622 int i, r = -1;
623
624 for (i = 0; i < adev->num_ip_blocks; i++) {
625 if (!adev->ip_block_status[i].valid)
626 continue;
627
628 if (adev->ip_blocks[i].type == block_type) {
629 r = adev->ip_blocks[i].funcs->set_clockgating_state(
630 (void *)adev,
631 state);
632 break;
633 }
634 }
635 return r;
636}
637
638int amdgpu_cgs_set_powergating_state(void *cgs_device,
639 enum amd_ip_block_type block_type,
640 enum amd_powergating_state state)
641{
642 CGS_FUNC_ADEV;
643 int i, r = -1;
644
645 for (i = 0; i < adev->num_ip_blocks; i++) {
646 if (!adev->ip_block_status[i].valid)
647 continue;
648
649 if (adev->ip_blocks[i].type == block_type) {
650 r = adev->ip_blocks[i].funcs->set_powergating_state(
651 (void *)adev,
652 state);
653 break;
654 }
655 }
656 return r;
657}
658
659
660static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
661{
662 CGS_FUNC_ADEV;
663 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
664
665 switch (fw_type) {
666 case CGS_UCODE_ID_SDMA0:
667 result = AMDGPU_UCODE_ID_SDMA0;
668 break;
669 case CGS_UCODE_ID_SDMA1:
670 result = AMDGPU_UCODE_ID_SDMA1;
671 break;
672 case CGS_UCODE_ID_CP_CE:
673 result = AMDGPU_UCODE_ID_CP_CE;
674 break;
675 case CGS_UCODE_ID_CP_PFP:
676 result = AMDGPU_UCODE_ID_CP_PFP;
677 break;
678 case CGS_UCODE_ID_CP_ME:
679 result = AMDGPU_UCODE_ID_CP_ME;
680 break;
681 case CGS_UCODE_ID_CP_MEC:
682 case CGS_UCODE_ID_CP_MEC_JT1:
683 result = AMDGPU_UCODE_ID_CP_MEC1;
684 break;
685 case CGS_UCODE_ID_CP_MEC_JT2:
686 if (adev->asic_type == CHIP_TONGA)
687 result = AMDGPU_UCODE_ID_CP_MEC2;
688 else if (adev->asic_type == CHIP_CARRIZO)
689 result = AMDGPU_UCODE_ID_CP_MEC1;
690 break;
691 case CGS_UCODE_ID_RLC_G:
692 result = AMDGPU_UCODE_ID_RLC_G;
693 break;
694 default:
695 DRM_ERROR("Firmware type not supported\n");
696 }
697 return result;
698}
699
700static int amdgpu_cgs_get_firmware_info(void *cgs_device,
701 enum cgs_ucode_id type,
702 struct cgs_firmware_info *info)
703{
704 CGS_FUNC_ADEV;
705
706 if (CGS_UCODE_ID_SMU != type) {
707 uint64_t gpu_addr;
708 uint32_t data_size;
709 const struct gfx_firmware_header_v1_0 *header;
710 enum AMDGPU_UCODE_ID id;
711 struct amdgpu_firmware_info *ucode;
712
713 id = fw_type_convert(cgs_device, type);
714 ucode = &adev->firmware.ucode[id];
715 if (ucode->fw == NULL)
716 return -EINVAL;
717
718 gpu_addr = ucode->mc_addr;
719 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
720 data_size = le32_to_cpu(header->header.ucode_size_bytes);
721
722 if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
723 (type == CGS_UCODE_ID_CP_MEC_JT2)) {
724 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
725 data_size = le32_to_cpu(header->jt_size) << 2;
726 }
727 info->mc_addr = gpu_addr;
728 info->image_size = data_size;
729 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
730 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
731 } else {
732 char fw_name[30] = {0};
733 int err = 0;
734 uint32_t ucode_size;
735 uint32_t ucode_start_address;
736 const uint8_t *src;
737 const struct smc_firmware_header_v1_0 *hdr;
738
739 switch (adev->asic_type) {
740 case CHIP_TONGA:
741 strcpy(fw_name, "amdgpu/tonga_smc.bin");
742 break;
743 default:
744 DRM_ERROR("SMC firmware not supported\n");
745 return -EINVAL;
746 }
747
748 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
749 if (err) {
750 DRM_ERROR("Failed to request firmware\n");
751 return err;
752 }
753
754 err = amdgpu_ucode_validate(adev->pm.fw);
755 if (err) {
756 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
757 release_firmware(adev->pm.fw);
758 adev->pm.fw = NULL;
759 return err;
760 }
761
762 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
763 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
764 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
765 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
766 src = (const uint8_t *)(adev->pm.fw->data +
767 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
768
769 info->version = adev->pm.fw_version;
770 info->image_size = ucode_size;
771 info->kptr = (void *)src;
772 }
773 return 0;
774}
775
776static const struct cgs_ops amdgpu_cgs_ops = {
777 amdgpu_cgs_gpu_mem_info,
778 amdgpu_cgs_gmap_kmem,
779 amdgpu_cgs_gunmap_kmem,
780 amdgpu_cgs_alloc_gpu_mem,
781 amdgpu_cgs_free_gpu_mem,
782 amdgpu_cgs_gmap_gpu_mem,
783 amdgpu_cgs_gunmap_gpu_mem,
784 amdgpu_cgs_kmap_gpu_mem,
785 amdgpu_cgs_kunmap_gpu_mem,
786 amdgpu_cgs_read_register,
787 amdgpu_cgs_write_register,
788 amdgpu_cgs_read_ind_register,
789 amdgpu_cgs_write_ind_register,
790 amdgpu_cgs_read_pci_config_byte,
791 amdgpu_cgs_read_pci_config_word,
792 amdgpu_cgs_read_pci_config_dword,
793 amdgpu_cgs_write_pci_config_byte,
794 amdgpu_cgs_write_pci_config_word,
795 amdgpu_cgs_write_pci_config_dword,
796 amdgpu_cgs_atom_get_data_table,
797 amdgpu_cgs_atom_get_cmd_table_revs,
798 amdgpu_cgs_atom_exec_cmd_table,
799 amdgpu_cgs_create_pm_request,
800 amdgpu_cgs_destroy_pm_request,
801 amdgpu_cgs_set_pm_request,
802 amdgpu_cgs_pm_request_clock,
803 amdgpu_cgs_pm_request_engine,
804 amdgpu_cgs_pm_query_clock_limits,
805 amdgpu_cgs_set_camera_voltages,
806 amdgpu_cgs_get_firmware_info,
807 amdgpu_cgs_set_powergating_state,
808 amdgpu_cgs_set_clockgating_state
809};
810
811static const struct cgs_os_ops amdgpu_cgs_os_ops = {
812 amdgpu_cgs_import_gpu_mem,
813 amdgpu_cgs_add_irq_source,
814 amdgpu_cgs_irq_get,
815 amdgpu_cgs_irq_put
816};
817
818void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
819{
820 struct amdgpu_cgs_device *cgs_device =
821 kmalloc(sizeof(*cgs_device), GFP_KERNEL);
822
823 if (!cgs_device) {
824 DRM_ERROR("Couldn't allocate CGS device structure\n");
825 return NULL;
826 }
827
828 cgs_device->base.ops = &amdgpu_cgs_ops;
829 cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
830 cgs_device->adev = adev;
831
832 return cgs_device;
833}
834
835void amdgpu_cgs_destroy_device(void *cgs_device)
836{
837 kfree(cgs_device);
838}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1f040d85ac47..e4424b4db5d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,12 +126,54 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
126 return 0; 126 return 0;
127} 127}
128 128
129static void amdgpu_job_work_func(struct work_struct *work)
130{
131 struct amdgpu_cs_parser *sched_job =
132 container_of(work, struct amdgpu_cs_parser,
133 job_work);
134 mutex_lock(&sched_job->job_lock);
135 if (sched_job->free_job)
136 sched_job->free_job(sched_job);
137 mutex_unlock(&sched_job->job_lock);
138 /* after processing job, free memory */
139 fence_put(&sched_job->s_fence->base);
140 kfree(sched_job);
141}
142struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
143 struct drm_file *filp,
144 struct amdgpu_ctx *ctx,
145 struct amdgpu_ib *ibs,
146 uint32_t num_ibs)
147{
148 struct amdgpu_cs_parser *parser;
149 int i;
150
151 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
152 if (!parser)
153 return NULL;
154
155 parser->adev = adev;
156 parser->filp = filp;
157 parser->ctx = ctx;
158 parser->ibs = ibs;
159 parser->num_ibs = num_ibs;
160 if (amdgpu_enable_scheduler) {
161 mutex_init(&parser->job_lock);
162 INIT_WORK(&parser->job_work, amdgpu_job_work_func);
163 }
164 for (i = 0; i < num_ibs; i++)
165 ibs[i].ctx = ctx;
166
167 return parser;
168}
169
129int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 170int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
130{ 171{
131 union drm_amdgpu_cs *cs = data; 172 union drm_amdgpu_cs *cs = data;
132 uint64_t *chunk_array_user; 173 uint64_t *chunk_array_user;
133 uint64_t *chunk_array = NULL; 174 uint64_t *chunk_array = NULL;
134 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 175 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
176 struct amdgpu_bo_list *bo_list = NULL;
135 unsigned size, i; 177 unsigned size, i;
136 int r = 0; 178 int r = 0;
137 179
@@ -143,17 +185,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
143 r = -EINVAL; 185 r = -EINVAL;
144 goto out; 186 goto out;
145 } 187 }
146 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 188 bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
189 if (!amdgpu_enable_scheduler)
190 p->bo_list = bo_list;
191 else {
192 if (bo_list && !bo_list->has_userptr) {
193 p->bo_list = amdgpu_bo_list_clone(bo_list);
194 amdgpu_bo_list_put(bo_list);
195 if (!p->bo_list)
196 return -ENOMEM;
197 } else if (bo_list && bo_list->has_userptr)
198 p->bo_list = bo_list;
199 else
200 p->bo_list = NULL;
201 }
147 202
148 /* get chunks */ 203 /* get chunks */
149 INIT_LIST_HEAD(&p->validated); 204 INIT_LIST_HEAD(&p->validated);
150 chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 205 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
151 if (chunk_array == NULL) { 206 if (chunk_array == NULL) {
152 r = -ENOMEM; 207 r = -ENOMEM;
153 goto out; 208 goto out;
154 } 209 }
155 210
156 chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks); 211 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
157 if (copy_from_user(chunk_array, chunk_array_user, 212 if (copy_from_user(chunk_array, chunk_array_user,
158 sizeof(uint64_t)*cs->in.num_chunks)) { 213 sizeof(uint64_t)*cs->in.num_chunks)) {
159 r = -EFAULT; 214 r = -EFAULT;
@@ -161,7 +216,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
161 } 216 }
162 217
163 p->nchunks = cs->in.num_chunks; 218 p->nchunks = cs->in.num_chunks;
164 p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk), 219 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
165 GFP_KERNEL); 220 GFP_KERNEL);
166 if (p->chunks == NULL) { 221 if (p->chunks == NULL) {
167 r = -ENOMEM; 222 r = -ENOMEM;
@@ -173,7 +228,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
173 struct drm_amdgpu_cs_chunk user_chunk; 228 struct drm_amdgpu_cs_chunk user_chunk;
174 uint32_t __user *cdata; 229 uint32_t __user *cdata;
175 230
176 chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; 231 chunk_ptr = (void __user *)chunk_array[i];
177 if (copy_from_user(&user_chunk, chunk_ptr, 232 if (copy_from_user(&user_chunk, chunk_ptr,
178 sizeof(struct drm_amdgpu_cs_chunk))) { 233 sizeof(struct drm_amdgpu_cs_chunk))) {
179 r = -EFAULT; 234 r = -EFAULT;
@@ -183,7 +238,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
183 p->chunks[i].length_dw = user_chunk.length_dw; 238 p->chunks[i].length_dw = user_chunk.length_dw;
184 239
185 size = p->chunks[i].length_dw; 240 size = p->chunks[i].length_dw;
186 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 241 cdata = (void __user *)user_chunk.chunk_data;
187 p->chunks[i].user_ptr = cdata; 242 p->chunks[i].user_ptr = cdata;
188 243
189 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 244 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -235,11 +290,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
235 } 290 }
236 } 291 }
237 292
238 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 293
239 if (!p->ibs) { 294 p->ibs = kmalloc_array(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
295 if (!p->ibs)
240 r = -ENOMEM; 296 r = -ENOMEM;
241 goto out;
242 }
243 297
244out: 298out:
245 kfree(chunk_array); 299 kfree(chunk_array);
@@ -415,18 +469,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
415 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 469 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
416} 470}
417 471
418/** 472static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
419 * cs_parser_fini() - clean parser states
420 * @parser: parser structure holding parsing context.
421 * @error: error number
422 *
423 * If error is set than unvalidate buffer, otherwise just free memory
424 * used by parsing context.
425 **/
426static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
427{ 473{
428 unsigned i;
429
430 if (!error) { 474 if (!error) {
431 /* Sort the buffer list from the smallest to largest buffer, 475 /* Sort the buffer list from the smallest to largest buffer,
432 * which affects the order of buffers in the LRU list. 476 * which affects the order of buffers in the LRU list.
@@ -447,11 +491,19 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
447 ttm_eu_backoff_reservation(&parser->ticket, 491 ttm_eu_backoff_reservation(&parser->ticket,
448 &parser->validated); 492 &parser->validated);
449 } 493 }
494}
450 495
496static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
497{
498 unsigned i;
451 if (parser->ctx) 499 if (parser->ctx)
452 amdgpu_ctx_put(parser->ctx); 500 amdgpu_ctx_put(parser->ctx);
453 if (parser->bo_list) 501 if (parser->bo_list) {
454 amdgpu_bo_list_put(parser->bo_list); 502 if (amdgpu_enable_scheduler && !parser->bo_list->has_userptr)
503 amdgpu_bo_list_free(parser->bo_list);
504 else
505 amdgpu_bo_list_put(parser->bo_list);
506 }
455 drm_free_large(parser->vm_bos); 507 drm_free_large(parser->vm_bos);
456 for (i = 0; i < parser->nchunks; i++) 508 for (i = 0; i < parser->nchunks; i++)
457 drm_free_large(parser->chunks[i].kdata); 509 drm_free_large(parser->chunks[i].kdata);
@@ -462,6 +514,29 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
462 kfree(parser->ibs); 514 kfree(parser->ibs);
463 if (parser->uf.bo) 515 if (parser->uf.bo)
464 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 516 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
517
518 if (!amdgpu_enable_scheduler)
519 kfree(parser);
520}
521
522/**
523 * cs_parser_fini() - clean parser states
524 * @parser: parser structure holding parsing context.
525 * @error: error number
526 *
527 * If error is set than unvalidate buffer, otherwise just free memory
528 * used by parsing context.
529 **/
530static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
531{
532 amdgpu_cs_parser_fini_early(parser, error, backoff);
533 amdgpu_cs_parser_fini_late(parser);
534}
535
536static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
537{
538 amdgpu_cs_parser_fini_late(sched_job);
539 return 0;
465} 540}
466 541
467static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 542static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -476,12 +551,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
476 if (r) 551 if (r)
477 return r; 552 return r;
478 553
554 r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
555 if (r)
556 return r;
557
479 r = amdgpu_vm_clear_freed(adev, vm); 558 r = amdgpu_vm_clear_freed(adev, vm);
480 if (r) 559 if (r)
481 return r; 560 return r;
482 561
483 if (p->bo_list) { 562 if (p->bo_list) {
484 for (i = 0; i < p->bo_list->num_entries; i++) { 563 for (i = 0; i < p->bo_list->num_entries; i++) {
564 struct fence *f;
565
485 /* ignore duplicates */ 566 /* ignore duplicates */
486 bo = p->bo_list->array[i].robj; 567 bo = p->bo_list->array[i].robj;
487 if (!bo) 568 if (!bo)
@@ -495,7 +576,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
495 if (r) 576 if (r)
496 return r; 577 return r;
497 578
498 amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); 579 f = bo_va->last_pt_update;
580 r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
581 if (r)
582 return r;
499 } 583 }
500 } 584 }
501 585
@@ -529,9 +613,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
529 goto out; 613 goto out;
530 } 614 }
531 amdgpu_cs_sync_rings(parser); 615 amdgpu_cs_sync_rings(parser);
532 616 if (!amdgpu_enable_scheduler)
533 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, 617 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
534 parser->filp); 618 parser->filp);
535 619
536out: 620out:
537 mutex_unlock(&vm->mutex); 621 mutex_unlock(&vm->mutex);
@@ -650,7 +734,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
650 ib->oa_size = amdgpu_bo_size(oa); 734 ib->oa_size = amdgpu_bo_size(oa);
651 } 735 }
652 } 736 }
653
654 /* wrap the last IB with user fence */ 737 /* wrap the last IB with user fence */
655 if (parser->uf.bo) { 738 if (parser->uf.bo) {
656 struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; 739 struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -693,9 +776,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
693 sizeof(struct drm_amdgpu_cs_chunk_dep); 776 sizeof(struct drm_amdgpu_cs_chunk_dep);
694 777
695 for (j = 0; j < num_deps; ++j) { 778 for (j = 0; j < num_deps; ++j) {
696 struct amdgpu_fence *fence;
697 struct amdgpu_ring *ring; 779 struct amdgpu_ring *ring;
698 struct amdgpu_ctx *ctx; 780 struct amdgpu_ctx *ctx;
781 struct fence *fence;
699 782
700 r = amdgpu_cs_get_ring(adev, deps[j].ip_type, 783 r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
701 deps[j].ip_instance, 784 deps[j].ip_instance,
@@ -707,50 +790,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
707 if (ctx == NULL) 790 if (ctx == NULL)
708 return -EINVAL; 791 return -EINVAL;
709 792
710 r = amdgpu_fence_recreate(ring, p->filp, 793 fence = amdgpu_ctx_get_fence(ctx, ring,
711 deps[j].handle, 794 deps[j].handle);
712 &fence); 795 if (IS_ERR(fence)) {
713 if (r) { 796 r = PTR_ERR(fence);
714 amdgpu_ctx_put(ctx); 797 amdgpu_ctx_put(ctx);
715 return r; 798 return r;
716 }
717 799
718 amdgpu_sync_fence(&ib->sync, fence); 800 } else if (fence) {
719 amdgpu_fence_unref(&fence); 801 r = amdgpu_sync_fence(adev, &ib->sync, fence);
720 amdgpu_ctx_put(ctx); 802 fence_put(fence);
803 amdgpu_ctx_put(ctx);
804 if (r)
805 return r;
806 }
721 } 807 }
722 } 808 }
723 809
724 return 0; 810 return 0;
725} 811}
726 812
727int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 813static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
728{ 814{
729 struct amdgpu_device *adev = dev->dev_private;
730 union drm_amdgpu_cs *cs = data;
731 struct amdgpu_cs_parser parser;
732 int r, i; 815 int r, i;
816 struct amdgpu_cs_parser *parser = sched_job;
817 struct amdgpu_device *adev = sched_job->adev;
733 bool reserved_buffers = false; 818 bool reserved_buffers = false;
734 819
735 down_read(&adev->exclusive_lock); 820 r = amdgpu_cs_parser_relocs(parser);
736 if (!adev->accel_working) {
737 up_read(&adev->exclusive_lock);
738 return -EBUSY;
739 }
740 /* initialize parser */
741 memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
742 parser.filp = filp;
743 parser.adev = adev;
744 r = amdgpu_cs_parser_init(&parser, data);
745 if (r) {
746 DRM_ERROR("Failed to initialize parser !\n");
747 amdgpu_cs_parser_fini(&parser, r, false);
748 up_read(&adev->exclusive_lock);
749 r = amdgpu_cs_handle_lockup(adev, r);
750 return r;
751 }
752
753 r = amdgpu_cs_parser_relocs(&parser);
754 if (r) { 821 if (r) {
755 if (r != -ERESTARTSYS) { 822 if (r != -ERESTARTSYS) {
756 if (r == -ENOMEM) 823 if (r == -ENOMEM)
@@ -762,30 +829,114 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
762 829
763 if (!r) { 830 if (!r) {
764 reserved_buffers = true; 831 reserved_buffers = true;
765 r = amdgpu_cs_ib_fill(adev, &parser); 832 r = amdgpu_cs_ib_fill(adev, parser);
833 }
834 if (!r) {
835 r = amdgpu_cs_dependencies(adev, parser);
836 if (r)
837 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
766 } 838 }
839 if (r) {
840 amdgpu_cs_parser_fini(parser, r, reserved_buffers);
841 return r;
842 }
843
844 for (i = 0; i < parser->num_ibs; i++)
845 trace_amdgpu_cs(parser, i);
846
847 r = amdgpu_cs_ib_vm_chunk(adev, parser);
848 return r;
849}
850
851static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
852 struct amdgpu_device *adev,
853 struct amdgpu_cs_parser *parser)
854{
855 int i, r;
856
857 struct amdgpu_cs_chunk *chunk;
858 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
859 struct amdgpu_ring *ring;
860 for (i = 0; i < parser->nchunks; i++) {
861 chunk = &parser->chunks[i];
862 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
863
864 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
865 continue;
866
867 r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
868 chunk_ib->ip_instance, chunk_ib->ring,
869 &ring);
870 if (r)
871 return NULL;
872 break;
873 }
874 return ring;
875}
876
877int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
878{
879 struct amdgpu_device *adev = dev->dev_private;
880 union drm_amdgpu_cs *cs = data;
881 struct amdgpu_cs_parser *parser;
882 int r;
767 883
768 if (!r) 884 down_read(&adev->exclusive_lock);
769 r = amdgpu_cs_dependencies(adev, &parser); 885 if (!adev->accel_working) {
886 up_read(&adev->exclusive_lock);
887 return -EBUSY;
888 }
770 889
890 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
891 if (!parser)
892 return -ENOMEM;
893 r = amdgpu_cs_parser_init(parser, data);
771 if (r) { 894 if (r) {
772 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 895 DRM_ERROR("Failed to initialize parser !\n");
896 amdgpu_cs_parser_fini(parser, r, false);
773 up_read(&adev->exclusive_lock); 897 up_read(&adev->exclusive_lock);
774 r = amdgpu_cs_handle_lockup(adev, r); 898 r = amdgpu_cs_handle_lockup(adev, r);
775 return r; 899 return r;
776 } 900 }
777 901
778 for (i = 0; i < parser.num_ibs; i++) 902 if (amdgpu_enable_scheduler && parser->num_ibs) {
779 trace_amdgpu_cs(&parser, i); 903 struct amdgpu_ring * ring =
904 amdgpu_cs_parser_get_ring(adev, parser);
905 r = amdgpu_cs_parser_prepare_job(parser);
906 if (r)
907 goto out;
908 parser->ring = ring;
909 parser->free_job = amdgpu_cs_parser_free_job;
910 mutex_lock(&parser->job_lock);
911 r = amd_sched_push_job(ring->scheduler,
912 &parser->ctx->rings[ring->idx].entity,
913 parser,
914 &parser->s_fence);
915 if (r) {
916 mutex_unlock(&parser->job_lock);
917 goto out;
918 }
919 parser->ibs[parser->num_ibs - 1].sequence =
920 amdgpu_ctx_add_fence(parser->ctx, ring,
921 &parser->s_fence->base,
922 parser->s_fence->v_seq);
923 cs->out.handle = parser->s_fence->v_seq;
924 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
925 ttm_eu_fence_buffer_objects(&parser->ticket,
926 &parser->validated,
927 &parser->s_fence->base);
780 928
781 r = amdgpu_cs_ib_vm_chunk(adev, &parser); 929 mutex_unlock(&parser->job_lock);
782 if (r) { 930 up_read(&adev->exclusive_lock);
783 goto out; 931 return 0;
784 } 932 }
933 r = amdgpu_cs_parser_prepare_job(parser);
934 if (r)
935 goto out;
785 936
786 cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq; 937 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
787out: 938out:
788 amdgpu_cs_parser_fini(&parser, r, true); 939 amdgpu_cs_parser_fini(parser, r, true);
789 up_read(&adev->exclusive_lock); 940 up_read(&adev->exclusive_lock);
790 r = amdgpu_cs_handle_lockup(adev, r); 941 r = amdgpu_cs_handle_lockup(adev, r);
791 return r; 942 return r;
@@ -806,30 +957,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
806 union drm_amdgpu_wait_cs *wait = data; 957 union drm_amdgpu_wait_cs *wait = data;
807 struct amdgpu_device *adev = dev->dev_private; 958 struct amdgpu_device *adev = dev->dev_private;
808 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 959 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
809 struct amdgpu_fence *fence = NULL;
810 struct amdgpu_ring *ring = NULL; 960 struct amdgpu_ring *ring = NULL;
811 struct amdgpu_ctx *ctx; 961 struct amdgpu_ctx *ctx;
962 struct fence *fence;
812 long r; 963 long r;
813 964
814 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
815 if (ctx == NULL)
816 return -EINVAL;
817
818 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, 965 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
819 wait->in.ring, &ring); 966 wait->in.ring, &ring);
820 if (r) { 967 if (r)
821 amdgpu_ctx_put(ctx);
822 return r; 968 return r;
823 }
824 969
825 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); 970 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
826 if (r) { 971 if (ctx == NULL)
827 amdgpu_ctx_put(ctx); 972 return -EINVAL;
828 return r; 973
829 } 974 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
975 if (IS_ERR(fence))
976 r = PTR_ERR(fence);
977 else if (fence) {
978 r = fence_wait_timeout(fence, true, timeout);
979 fence_put(fence);
980 } else
981 r = 1;
830 982
831 r = fence_wait_timeout(&fence->base, true, timeout);
832 amdgpu_fence_unref(&fence);
833 amdgpu_ctx_put(ctx); 983 amdgpu_ctx_put(ctx);
834 if (r < 0) 984 if (r < 0)
835 return r; 985 return r;
@@ -864,7 +1014,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
864 if (!reloc->bo_va) 1014 if (!reloc->bo_va)
865 continue; 1015 continue;
866 1016
867 list_for_each_entry(mapping, &reloc->bo_va->mappings, list) { 1017 list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
1018 if (mapping->it.start > addr ||
1019 addr > mapping->it.last)
1020 continue;
1021
1022 *bo = reloc->bo_va->bo;
1023 return mapping;
1024 }
1025
1026 list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
868 if (mapping->it.start > addr || 1027 if (mapping->it.start > addr ||
869 addr > mapping->it.last) 1028 addr > mapping->it.last)
870 continue; 1029 continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6c66ac8a1891..08bc7722ddb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,54 +25,107 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "amdgpu.h" 26#include "amdgpu.h"
27 27
28static void amdgpu_ctx_do_release(struct kref *ref) 28int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
29 struct amdgpu_ctx *ctx)
29{ 30{
30 struct amdgpu_ctx *ctx; 31 unsigned i, j;
31 struct amdgpu_ctx_mgr *mgr; 32 int r;
32 33
33 ctx = container_of(ref, struct amdgpu_ctx, refcount); 34 memset(ctx, 0, sizeof(*ctx));
34 mgr = &ctx->fpriv->ctx_mgr; 35 ctx->adev = adev;
36 kref_init(&ctx->refcount);
37 spin_lock_init(&ctx->ring_lock);
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
39 ctx->rings[i].sequence = 1;
35 40
36 idr_remove(&mgr->ctx_handles, ctx->id); 41 if (amdgpu_enable_scheduler) {
37 kfree(ctx); 42 /* create context entity for each ring */
43 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq;
45 if (kernel)
46 rq = &adev->rings[i]->scheduler->kernel_rq;
47 else
48 rq = &adev->rings[i]->scheduler->sched_rq;
49 r = amd_sched_entity_init(adev->rings[i]->scheduler,
50 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs);
52 if (r)
53 break;
54 }
55
56 if (i < adev->num_rings) {
57 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(adev->rings[j]->scheduler,
59 &ctx->rings[j].entity);
60 kfree(ctx);
61 return r;
62 }
63 }
64 return 0;
38} 65}
39 66
40int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags) 67void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
68{
69 struct amdgpu_device *adev = ctx->adev;
70 unsigned i, j;
71
72 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
73 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
74 fence_put(ctx->rings[i].fences[j]);
75
76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++)
78 amd_sched_entity_fini(adev->rings[i]->scheduler,
79 &ctx->rings[i].entity);
80 }
81}
82
83static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
84 struct amdgpu_fpriv *fpriv,
85 uint32_t *id)
41{ 86{
42 int r;
43 struct amdgpu_ctx *ctx;
44 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 87 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
88 struct amdgpu_ctx *ctx;
89 int r;
45 90
46 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 91 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
47 if (!ctx) 92 if (!ctx)
48 return -ENOMEM; 93 return -ENOMEM;
49 94
50 mutex_lock(&mgr->lock); 95 mutex_lock(&mgr->lock);
51 r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL); 96 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
52 if (r < 0) { 97 if (r < 0) {
53 mutex_unlock(&mgr->lock); 98 mutex_unlock(&mgr->lock);
54 kfree(ctx); 99 kfree(ctx);
55 return r; 100 return r;
56 } 101 }
57 *id = (uint32_t)r; 102 *id = (uint32_t)r;
58 103 r = amdgpu_ctx_init(adev, false, ctx);
59 memset(ctx, 0, sizeof(*ctx));
60 ctx->id = *id;
61 ctx->fpriv = fpriv;
62 kref_init(&ctx->refcount);
63 mutex_unlock(&mgr->lock); 104 mutex_unlock(&mgr->lock);
64 105
65 return 0; 106 return r;
66} 107}
67 108
68int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) 109static void amdgpu_ctx_do_release(struct kref *ref)
69{ 110{
70 struct amdgpu_ctx *ctx; 111 struct amdgpu_ctx *ctx;
112
113 ctx = container_of(ref, struct amdgpu_ctx, refcount);
114
115 amdgpu_ctx_fini(ctx);
116
117 kfree(ctx);
118}
119
120static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
121{
71 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 122 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
123 struct amdgpu_ctx *ctx;
72 124
73 mutex_lock(&mgr->lock); 125 mutex_lock(&mgr->lock);
74 ctx = idr_find(&mgr->ctx_handles, id); 126 ctx = idr_find(&mgr->ctx_handles, id);
75 if (ctx) { 127 if (ctx) {
128 idr_remove(&mgr->ctx_handles, id);
76 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 129 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
77 mutex_unlock(&mgr->lock); 130 mutex_unlock(&mgr->lock);
78 return 0; 131 return 0;
@@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
86 union drm_amdgpu_ctx_out *out) 139 union drm_amdgpu_ctx_out *out)
87{ 140{
88 struct amdgpu_ctx *ctx; 141 struct amdgpu_ctx *ctx;
89 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 142 struct amdgpu_ctx_mgr *mgr;
90 unsigned reset_counter; 143 unsigned reset_counter;
91 144
145 if (!fpriv)
146 return -EINVAL;
147
148 mgr = &fpriv->ctx_mgr;
92 mutex_lock(&mgr->lock); 149 mutex_lock(&mgr->lock);
93 ctx = idr_find(&mgr->ctx_handles, id); 150 ctx = idr_find(&mgr->ctx_handles, id);
94 if (!ctx) { 151 if (!ctx) {
@@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
97 } 154 }
98 155
99 /* TODO: these two are always zero */ 156 /* TODO: these two are always zero */
100 out->state.flags = ctx->state.flags; 157 out->state.flags = 0x0;
101 out->state.hangs = ctx->state.hangs; 158 out->state.hangs = 0x0;
102 159
103 /* determine if a GPU reset has occured since the last call */ 160 /* determine if a GPU reset has occured since the last call */
104 reset_counter = atomic_read(&adev->gpu_reset_counter); 161 reset_counter = atomic_read(&adev->gpu_reset_counter);
@@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
113 return 0; 170 return 0;
114} 171}
115 172
116void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
117{
118 struct idr *idp;
119 struct amdgpu_ctx *ctx;
120 uint32_t id;
121 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
122 idp = &mgr->ctx_handles;
123
124 idr_for_each_entry(idp,ctx,id) {
125 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
126 DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
127 }
128
129 mutex_destroy(&mgr->lock);
130}
131
132int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 173int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *filp) 174 struct drm_file *filp)
134{ 175{
135 int r; 176 int r;
136 uint32_t id; 177 uint32_t id;
137 uint32_t flags;
138 178
139 union drm_amdgpu_ctx *args = data; 179 union drm_amdgpu_ctx *args = data;
140 struct amdgpu_device *adev = dev->dev_private; 180 struct amdgpu_device *adev = dev->dev_private;
@@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
142 182
143 r = 0; 183 r = 0;
144 id = args->in.ctx_id; 184 id = args->in.ctx_id;
145 flags = args->in.flags;
146 185
147 switch (args->in.op) { 186 switch (args->in.op) {
148 case AMDGPU_CTX_OP_ALLOC_CTX: 187 case AMDGPU_CTX_OP_ALLOC_CTX:
149 r = amdgpu_ctx_alloc(adev, fpriv, &id, flags); 188 r = amdgpu_ctx_alloc(adev, fpriv, &id);
150 args->out.alloc.ctx_id = id; 189 args->out.alloc.ctx_id = id;
151 break; 190 break;
152 case AMDGPU_CTX_OP_FREE_CTX: 191 case AMDGPU_CTX_OP_FREE_CTX:
153 r = amdgpu_ctx_free(adev, fpriv, id); 192 r = amdgpu_ctx_free(fpriv, id);
154 break; 193 break;
155 case AMDGPU_CTX_OP_QUERY_STATE: 194 case AMDGPU_CTX_OP_QUERY_STATE:
156 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 195 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
165struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) 204struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
166{ 205{
167 struct amdgpu_ctx *ctx; 206 struct amdgpu_ctx *ctx;
168 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 207 struct amdgpu_ctx_mgr *mgr;
208
209 if (!fpriv)
210 return NULL;
211
212 mgr = &fpriv->ctx_mgr;
169 213
170 mutex_lock(&mgr->lock); 214 mutex_lock(&mgr->lock);
171 ctx = idr_find(&mgr->ctx_handles, id); 215 ctx = idr_find(&mgr->ctx_handles, id);
@@ -177,17 +221,96 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
177 221
178int amdgpu_ctx_put(struct amdgpu_ctx *ctx) 222int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
179{ 223{
180 struct amdgpu_fpriv *fpriv;
181 struct amdgpu_ctx_mgr *mgr;
182
183 if (ctx == NULL) 224 if (ctx == NULL)
184 return -EINVAL; 225 return -EINVAL;
185 226
186 fpriv = ctx->fpriv;
187 mgr = &fpriv->ctx_mgr;
188 mutex_lock(&mgr->lock);
189 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 227 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
190 mutex_unlock(&mgr->lock);
191
192 return 0; 228 return 0;
193} 229}
230
231uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
232 struct fence *fence, uint64_t queued_seq)
233{
234 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
235 uint64_t seq = 0;
236 unsigned idx = 0;
237 struct fence *other = NULL;
238
239 if (amdgpu_enable_scheduler)
240 seq = queued_seq;
241 else
242 seq = cring->sequence;
243 idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
244 other = cring->fences[idx];
245 if (other) {
246 signed long r;
247 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
248 if (r < 0)
249 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
250 }
251
252 fence_get(fence);
253
254 spin_lock(&ctx->ring_lock);
255 cring->fences[idx] = fence;
256 if (!amdgpu_enable_scheduler)
257 cring->sequence++;
258 spin_unlock(&ctx->ring_lock);
259
260 fence_put(other);
261
262 return seq;
263}
264
265struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
266 struct amdgpu_ring *ring, uint64_t seq)
267{
268 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
269 struct fence *fence;
270 uint64_t queued_seq;
271
272 spin_lock(&ctx->ring_lock);
273 if (amdgpu_enable_scheduler)
274 queued_seq = amd_sched_next_queued_seq(&cring->entity);
275 else
276 queued_seq = cring->sequence;
277
278 if (seq >= queued_seq) {
279 spin_unlock(&ctx->ring_lock);
280 return ERR_PTR(-EINVAL);
281 }
282
283
284 if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
285 spin_unlock(&ctx->ring_lock);
286 return NULL;
287 }
288
289 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
290 spin_unlock(&ctx->ring_lock);
291
292 return fence;
293}
294
295void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
296{
297 mutex_init(&mgr->lock);
298 idr_init(&mgr->ctx_handles);
299}
300
301void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
302{
303 struct amdgpu_ctx *ctx;
304 struct idr *idp;
305 uint32_t id;
306
307 idp = &mgr->ctx_handles;
308
309 idr_for_each_entry(idp, ctx, id) {
310 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
311 DRM_ERROR("ctx %p is still alive\n", ctx);
312 }
313
314 idr_destroy(&mgr->ctx_handles);
315 mutex_destroy(&mgr->lock);
316}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 99f158e1baff..42d1a22c1199 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = {
55 "MULLINS", 55 "MULLINS",
56 "TOPAZ", 56 "TOPAZ",
57 "TONGA", 57 "TONGA",
58 "FIJI",
58 "CARRIZO", 59 "CARRIZO",
59 "LAST", 60 "LAST",
60}; 61};
@@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev)
63{ 64{
64 struct amdgpu_device *adev = dev->dev_private; 65 struct amdgpu_device *adev = dev->dev_private;
65 66
66 if (adev->flags & AMDGPU_IS_PX) 67 if (adev->flags & AMD_IS_PX)
67 return true; 68 return true;
68 return false; 69 return false;
69} 70}
@@ -1160,6 +1161,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1160 switch (adev->asic_type) { 1161 switch (adev->asic_type) {
1161 case CHIP_TOPAZ: 1162 case CHIP_TOPAZ:
1162 case CHIP_TONGA: 1163 case CHIP_TONGA:
1164 case CHIP_FIJI:
1163 case CHIP_CARRIZO: 1165 case CHIP_CARRIZO:
1164 if (adev->asic_type == CHIP_CARRIZO) 1166 if (adev->asic_type == CHIP_CARRIZO)
1165 adev->family = AMDGPU_FAMILY_CZ; 1167 adev->family = AMDGPU_FAMILY_CZ;
@@ -1377,7 +1379,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1377 adev->ddev = ddev; 1379 adev->ddev = ddev;
1378 adev->pdev = pdev; 1380 adev->pdev = pdev;
1379 adev->flags = flags; 1381 adev->flags = flags;
1380 adev->asic_type = flags & AMDGPU_ASIC_MASK; 1382 adev->asic_type = flags & AMD_ASIC_MASK;
1381 adev->is_atom_bios = false; 1383 adev->is_atom_bios = false;
1382 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 1384 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1383 adev->mc.gtt_size = 512 * 1024 * 1024; 1385 adev->mc.gtt_size = 512 * 1024 * 1024;
@@ -1523,6 +1525,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1523 return r; 1525 return r;
1524 } 1526 }
1525 1527
1528 r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
1529 if (r) {
1530 dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
1531 return r;
1532 }
1526 r = amdgpu_ib_ring_tests(adev); 1533 r = amdgpu_ib_ring_tests(adev);
1527 if (r) 1534 if (r)
1528 DRM_ERROR("ib ring test failed (%d).\n", r); 1535 DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1584,6 +1591,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1584 adev->shutdown = true; 1591 adev->shutdown = true;
1585 /* evict vram memory */ 1592 /* evict vram memory */
1586 amdgpu_bo_evict_vram(adev); 1593 amdgpu_bo_evict_vram(adev);
1594 amdgpu_ctx_fini(&adev->kernel_ctx);
1587 amdgpu_ib_pool_fini(adev); 1595 amdgpu_ib_pool_fini(adev);
1588 amdgpu_fence_driver_fini(adev); 1596 amdgpu_fence_driver_fini(adev);
1589 amdgpu_fbdev_fini(adev); 1597 amdgpu_fbdev_fini(adev);
@@ -1627,8 +1635,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1627 struct amdgpu_device *adev; 1635 struct amdgpu_device *adev;
1628 struct drm_crtc *crtc; 1636 struct drm_crtc *crtc;
1629 struct drm_connector *connector; 1637 struct drm_connector *connector;
1630 int i, r; 1638 int r;
1631 bool force_completion = false;
1632 1639
1633 if (dev == NULL || dev->dev_private == NULL) { 1640 if (dev == NULL || dev->dev_private == NULL) {
1634 return -ENODEV; 1641 return -ENODEV;
@@ -1667,21 +1674,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1667 /* evict vram memory */ 1674 /* evict vram memory */
1668 amdgpu_bo_evict_vram(adev); 1675 amdgpu_bo_evict_vram(adev);
1669 1676
1670 /* wait for gpu to finish processing current batch */ 1677 amdgpu_fence_driver_suspend(adev);
1671 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1672 struct amdgpu_ring *ring = adev->rings[i];
1673 if (!ring)
1674 continue;
1675
1676 r = amdgpu_fence_wait_empty(ring);
1677 if (r) {
1678 /* delay GPU reset to resume */
1679 force_completion = true;
1680 }
1681 }
1682 if (force_completion) {
1683 amdgpu_fence_driver_force_completion(adev);
1684 }
1685 1678
1686 r = amdgpu_suspend(adev); 1679 r = amdgpu_suspend(adev);
1687 1680
@@ -1739,6 +1732,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1739 1732
1740 r = amdgpu_resume(adev); 1733 r = amdgpu_resume(adev);
1741 1734
1735 amdgpu_fence_driver_resume(adev);
1736
1742 r = amdgpu_ib_ring_tests(adev); 1737 r = amdgpu_ib_ring_tests(adev);
1743 if (r) 1738 if (r)
1744 DRM_ERROR("ib ring test failed (%d).\n", r); 1739 DRM_ERROR("ib ring test failed (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b16b9256883e..e3d70772b531 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,36 @@
35#include <drm/drm_crtc_helper.h> 35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
37 37
38static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
39 struct fence **f)
40{
41 struct amdgpu_fence *fence;
42 long r;
43
44 if (*f == NULL)
45 return;
46
47 fence = to_amdgpu_fence(*f);
48 if (fence) {
49 r = fence_wait(&fence->base, false);
50 if (r == -EDEADLK) {
51 up_read(&adev->exclusive_lock);
52 r = amdgpu_gpu_reset(adev);
53 down_read(&adev->exclusive_lock);
54 }
55 } else
56 r = fence_wait(*f, false);
57
58 if (r)
59 DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
60
61 /* We continue with the page flip even if we failed to wait on
62 * the fence, otherwise the DRM core and userspace will be
63 * confused about which BO the CRTC is scanning out
64 */
65 fence_put(*f);
66 *f = NULL;
67}
38 68
39static void amdgpu_flip_work_func(struct work_struct *__work) 69static void amdgpu_flip_work_func(struct work_struct *__work)
40{ 70{
@@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
44 struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; 74 struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
45 75
46 struct drm_crtc *crtc = &amdgpuCrtc->base; 76 struct drm_crtc *crtc = &amdgpuCrtc->base;
47 struct amdgpu_fence *fence;
48 unsigned long flags; 77 unsigned long flags;
49 int r; 78 unsigned i;
50 79
51 down_read(&adev->exclusive_lock); 80 down_read(&adev->exclusive_lock);
52 if (work->fence) { 81 amdgpu_flip_wait_fence(adev, &work->excl);
53 fence = to_amdgpu_fence(work->fence); 82 for (i = 0; i < work->shared_count; ++i)
54 if (fence) { 83 amdgpu_flip_wait_fence(adev, &work->shared[i]);
55 r = amdgpu_fence_wait(fence, false);
56 if (r == -EDEADLK) {
57 up_read(&adev->exclusive_lock);
58 r = amdgpu_gpu_reset(adev);
59 down_read(&adev->exclusive_lock);
60 }
61 } else
62 r = fence_wait(work->fence, false);
63
64 if (r)
65 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
66
67 /* We continue with the page flip even if we failed to wait on
68 * the fence, otherwise the DRM core and userspace will be
69 * confused about which BO the CRTC is scanning out
70 */
71
72 fence_put(work->fence);
73 work->fence = NULL;
74 }
75 84
76 /* We borrow the event spin lock for protecting flip_status */ 85 /* We borrow the event spin lock for protecting flip_status */
77 spin_lock_irqsave(&crtc->dev->event_lock, flags); 86 spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
108 DRM_ERROR("failed to reserve buffer after flip\n"); 117 DRM_ERROR("failed to reserve buffer after flip\n");
109 118
110 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 119 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
120 kfree(work->shared);
111 kfree(work); 121 kfree(work);
112} 122}
113 123
@@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
127 unsigned long flags; 137 unsigned long flags;
128 u64 tiling_flags; 138 u64 tiling_flags;
129 u64 base; 139 u64 base;
130 int r; 140 int i, r;
131 141
132 work = kzalloc(sizeof *work, GFP_KERNEL); 142 work = kzalloc(sizeof *work, GFP_KERNEL);
133 if (work == NULL) 143 if (work == NULL)
@@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
167 goto cleanup; 177 goto cleanup;
168 } 178 }
169 179
170 work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); 180 r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
181 &work->shared_count,
182 &work->shared);
183 if (unlikely(r != 0)) {
184 amdgpu_bo_unreserve(new_rbo);
185 DRM_ERROR("failed to get fences for buffer\n");
186 goto cleanup;
187 }
188
189 fence_get(work->excl);
190 for (i = 0; i < work->shared_count; ++i)
191 fence_get(work->shared[i]);
192
171 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); 193 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
172 amdgpu_bo_unreserve(new_rbo); 194 amdgpu_bo_unreserve(new_rbo);
173 195
@@ -212,7 +234,10 @@ pflip_cleanup:
212 234
213cleanup: 235cleanup:
214 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 236 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
215 fence_put(work->fence); 237 fence_put(work->excl);
238 for (i = 0; i < work->shared_count; ++i)
239 fence_put(work->shared[i]);
240 kfree(work->shared);
216 kfree(work); 241 kfree(work);
217 242
218 return r; 243 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 115906f5fda0..e6fa27805207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -63,7 +63,7 @@ int amdgpu_disp_priority = 0;
63int amdgpu_hw_i2c = 0; 63int amdgpu_hw_i2c = 0;
64int amdgpu_pcie_gen2 = -1; 64int amdgpu_pcie_gen2 = -1;
65int amdgpu_msi = -1; 65int amdgpu_msi = -1;
66int amdgpu_lockup_timeout = 10000; 66int amdgpu_lockup_timeout = 0;
67int amdgpu_dpm = -1; 67int amdgpu_dpm = -1;
68int amdgpu_smc_load_fw = 1; 68int amdgpu_smc_load_fw = 1;
69int amdgpu_aspm = -1; 69int amdgpu_aspm = -1;
@@ -75,6 +75,9 @@ int amdgpu_deep_color = 0;
75int amdgpu_vm_size = 8; 75int amdgpu_vm_size = 8;
76int amdgpu_vm_block_size = -1; 76int amdgpu_vm_block_size = -1;
77int amdgpu_exp_hw_support = 0; 77int amdgpu_exp_hw_support = 0;
78int amdgpu_enable_scheduler = 0;
79int amdgpu_sched_jobs = 16;
80int amdgpu_sched_hw_submission = 2;
78 81
79MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 82MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
80module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 83module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -103,7 +106,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
103MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); 106MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
104module_param_named(msi, amdgpu_msi, int, 0444); 107module_param_named(msi, amdgpu_msi, int, 0444);
105 108
106MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); 109MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
107module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444); 110module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
108 111
109MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 112MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -139,36 +142,45 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
139MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); 142MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
140module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); 143module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
141 144
145MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
146module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
147
148MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
149module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
150
151MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
152module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
153
142static struct pci_device_id pciidlist[] = { 154static struct pci_device_id pciidlist[] = {
143#ifdef CONFIG_DRM_AMDGPU_CIK 155#ifdef CONFIG_DRM_AMDGPU_CIK
144 /* Kaveri */ 156 /* Kaveri */
145 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 157 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
146 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 158 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
147 {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 159 {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
148 {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 160 {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
149 {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 161 {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
150 {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 162 {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
151 {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 163 {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
152 {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 164 {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
153 {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 165 {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
154 {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 166 {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
155 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 167 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
156 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 168 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
157 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 169 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
158 {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 170 {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
159 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 171 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
160 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 172 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
161 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 173 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
162 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 174 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
163 {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 175 {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
164 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 176 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
165 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 177 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
166 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, 178 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
167 /* Bonaire */ 179 /* Bonaire */
168 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, 180 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
169 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, 181 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
170 {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, 182 {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
171 {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, 183 {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
172 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 184 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
173 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 185 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
174 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 186 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
@@ -190,39 +202,39 @@ static struct pci_device_id pciidlist[] = {
190 {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 202 {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
191 {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 203 {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
192 /* Kabini */ 204 /* Kabini */
193 {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 205 {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
194 {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 206 {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
195 {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 207 {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
196 {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 208 {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
197 {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 209 {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
198 {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 210 {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
199 {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 211 {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
200 {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 212 {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
201 {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 213 {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
202 {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 214 {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
203 {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 215 {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
204 {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 216 {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
205 {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 217 {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
206 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 218 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
207 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 219 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
208 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, 220 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
209 /* mullins */ 221 /* mullins */
210 {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 222 {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
211 {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 223 {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
212 {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 224 {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
213 {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 225 {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
214 {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 226 {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
215 {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 227 {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
216 {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 228 {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
217 {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 229 {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
218 {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 230 {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
219 {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 231 {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
220 {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 232 {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
221 {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 233 {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
222 {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 234 {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
223 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 235 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
224 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 236 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
225 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, 237 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
226#endif 238#endif
227 /* topaz */ 239 /* topaz */
228 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 240 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -240,12 +252,14 @@ static struct pci_device_id pciidlist[] = {
240 {0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 252 {0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
241 {0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 253 {0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
242 {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 254 {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
255 /* fiji */
256 {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
243 /* carrizo */ 257 /* carrizo */
244 {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, 258 {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
245 {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, 259 {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
246 {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, 260 {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
247 {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, 261 {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
248 {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, 262 {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
249 263
250 {0, 0, 0} 264 {0, 0, 0}
251}; 265};
@@ -281,7 +295,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
281 unsigned long flags = ent->driver_data; 295 unsigned long flags = ent->driver_data;
282 int ret; 296 int ret;
283 297
284 if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { 298 if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
285 DRM_INFO("This hardware requires experimental hardware support.\n" 299 DRM_INFO("This hardware requires experimental hardware support.\n"
286 "See modparam exp_hw_support\n"); 300 "See modparam exp_hw_support\n");
287 return -ENODEV; 301 return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index cceeb33c447a..e3a4f7048042 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -31,7 +31,7 @@
31#include <linux/firmware.h> 31#include <linux/firmware.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33 33
34#include "amdgpu_family.h" 34#include "amd_shared.h"
35 35
36/* General customization: 36/* General customization:
37 */ 37 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c1645d21f8e2..81b821247dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = {
53 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
54 .fb_check_var = drm_fb_helper_check_var, 54 .fb_check_var = drm_fb_helper_check_var,
55 .fb_set_par = drm_fb_helper_set_par, 55 .fb_set_par = drm_fb_helper_set_par,
56 .fb_fillrect = cfb_fillrect, 56 .fb_fillrect = drm_fb_helper_cfb_fillrect,
57 .fb_copyarea = cfb_copyarea, 57 .fb_copyarea = drm_fb_helper_cfb_copyarea,
58 .fb_imageblit = cfb_imageblit, 58 .fb_imageblit = drm_fb_helper_cfb_imageblit,
59 .fb_pan_display = drm_fb_helper_pan_display, 59 .fb_pan_display = drm_fb_helper_pan_display,
60 .fb_blank = drm_fb_helper_blank, 60 .fb_blank = drm_fb_helper_blank,
61 .fb_setcmap = drm_fb_helper_setcmap, 61 .fb_setcmap = drm_fb_helper_setcmap,
@@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
179 struct drm_mode_fb_cmd2 mode_cmd; 179 struct drm_mode_fb_cmd2 mode_cmd;
180 struct drm_gem_object *gobj = NULL; 180 struct drm_gem_object *gobj = NULL;
181 struct amdgpu_bo *rbo = NULL; 181 struct amdgpu_bo *rbo = NULL;
182 struct device *device = &adev->pdev->dev;
183 int ret; 182 int ret;
184 unsigned long tmp; 183 unsigned long tmp;
185 184
@@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
201 rbo = gem_to_amdgpu_bo(gobj); 200 rbo = gem_to_amdgpu_bo(gobj);
202 201
203 /* okay we have an object now allocate the framebuffer */ 202 /* okay we have an object now allocate the framebuffer */
204 info = framebuffer_alloc(0, device); 203 info = drm_fb_helper_alloc_fbi(helper);
205 if (info == NULL) { 204 if (IS_ERR(info)) {
206 ret = -ENOMEM; 205 ret = PTR_ERR(info);
207 goto out_unref; 206 goto out_unref;
208 } 207 }
209 208
@@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
212 ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 211 ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
213 if (ret) { 212 if (ret) {
214 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 213 DRM_ERROR("failed to initialize framebuffer %d\n", ret);
215 goto out_unref; 214 goto out_destroy_fbi;
216 } 215 }
217 216
218 fb = &rfbdev->rfb.base; 217 fb = &rfbdev->rfb.base;
219 218
220 /* setup helper */ 219 /* setup helper */
221 rfbdev->helper.fb = fb; 220 rfbdev->helper.fb = fb;
222 rfbdev->helper.fbdev = info;
223 221
224 memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); 222 memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
225 223
@@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
239 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); 237 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
240 238
241 /* setup aperture base/size for vesafb takeover */ 239 /* setup aperture base/size for vesafb takeover */
242 info->apertures = alloc_apertures(1);
243 if (!info->apertures) {
244 ret = -ENOMEM;
245 goto out_unref;
246 }
247 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; 240 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
248 info->apertures->ranges[0].size = adev->mc.aper_size; 241 info->apertures->ranges[0].size = adev->mc.aper_size;
249 242
@@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
251 244
252 if (info->screen_base == NULL) { 245 if (info->screen_base == NULL) {
253 ret = -ENOSPC; 246 ret = -ENOSPC;
254 goto out_unref; 247 goto out_destroy_fbi;
255 }
256
257 ret = fb_alloc_cmap(&info->cmap, 256, 0);
258 if (ret) {
259 ret = -ENOMEM;
260 goto out_unref;
261 } 248 }
262 249
263 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 250 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
269 vga_switcheroo_client_fb_set(adev->ddev->pdev, info); 256 vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
270 return 0; 257 return 0;
271 258
259out_destroy_fbi:
260 drm_fb_helper_release_fbi(helper);
272out_unref: 261out_unref:
273 if (rbo) { 262 if (rbo) {
274 263
@@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
290 279
291static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) 280static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
292{ 281{
293 struct fb_info *info;
294 struct amdgpu_framebuffer *rfb = &rfbdev->rfb; 282 struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
295 283
296 if (rfbdev->helper.fbdev) { 284 drm_fb_helper_unregister_fbi(&rfbdev->helper);
297 info = rfbdev->helper.fbdev; 285 drm_fb_helper_release_fbi(&rfbdev->helper);
298
299 unregister_framebuffer(info);
300 if (info->cmap.len)
301 fb_dealloc_cmap(&info->cmap);
302 framebuffer_release(info);
303 }
304 286
305 if (rfb->obj) { 287 if (rfb->obj) {
306 amdgpufb_destroy_pinned_object(rfb->obj); 288 amdgpufb_destroy_pinned_object(rfb->obj);
@@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
395void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) 377void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
396{ 378{
397 if (adev->mode_info.rfbdev) 379 if (adev->mode_info.rfbdev)
398 fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state); 380 drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
381 state);
399} 382}
400 383
401int amdgpu_fbdev_total_size(struct amdgpu_device *adev) 384int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index a7189a1fa6a1..98500f1756f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
126 (*fence)->ring = ring; 126 (*fence)->ring = ring;
127 (*fence)->owner = owner; 127 (*fence)->owner = owner;
128 fence_init(&(*fence)->base, &amdgpu_fence_ops, 128 fence_init(&(*fence)->base, &amdgpu_fence_ops,
129 &adev->fence_queue.lock, adev->fence_context + ring->idx, 129 &ring->fence_drv.fence_queue.lock,
130 adev->fence_context + ring->idx,
130 (*fence)->seq); 131 (*fence)->seq);
131 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
132 (*fence)->seq, 133 (*fence)->seq,
@@ -136,38 +137,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
136} 137}
137 138
138/** 139/**
139 * amdgpu_fence_recreate - recreate a fence from an user fence
140 *
141 * @ring: ring the fence is associated with
142 * @owner: creator of the fence
143 * @seq: user fence sequence number
144 * @fence: resulting amdgpu fence object
145 *
146 * Recreates a fence command from the user fence sequence number (all asics).
147 * Returns 0 on success, -ENOMEM on failure.
148 */
149int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
150 uint64_t seq, struct amdgpu_fence **fence)
151{
152 struct amdgpu_device *adev = ring->adev;
153
154 if (seq > ring->fence_drv.sync_seq[ring->idx])
155 return -EINVAL;
156
157 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
158 if ((*fence) == NULL)
159 return -ENOMEM;
160
161 (*fence)->seq = seq;
162 (*fence)->ring = ring;
163 (*fence)->owner = owner;
164 fence_init(&(*fence)->base, &amdgpu_fence_ops,
165 &adev->fence_queue.lock, adev->fence_context + ring->idx,
166 (*fence)->seq);
167 return 0;
168}
169
170/**
171 * amdgpu_fence_check_signaled - callback from fence_queue 140 * amdgpu_fence_check_signaled - callback from fence_queue
172 * 141 *
173 * this function is called with fence_queue lock held, which is also used 142 * this function is called with fence_queue lock held, which is also used
@@ -196,9 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
196 else 165 else
197 FENCE_TRACE(&fence->base, "was already signaled\n"); 166 FENCE_TRACE(&fence->base, "was already signaled\n");
198 167
199 amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src, 168 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
200 fence->ring->fence_drv.irq_type);
201 __remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
202 fence_put(&fence->base); 169 fence_put(&fence->base);
203 } else 170 } else
204 FENCE_TRACE(&fence->base, "pending\n"); 171 FENCE_TRACE(&fence->base, "pending\n");
@@ -299,14 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
299 return; 266 return;
300 } 267 }
301 268
302 if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) { 269 if (amdgpu_fence_activity(ring)) {
303 fence_drv->delayed_irq = false; 270 wake_up_all(&ring->fence_drv.fence_queue);
304 amdgpu_irq_update(ring->adev, fence_drv->irq_src,
305 fence_drv->irq_type);
306 } 271 }
307
308 if (amdgpu_fence_activity(ring))
309 wake_up_all(&ring->adev->fence_queue);
310 else if (amdgpu_ring_is_lockup(ring)) { 272 else if (amdgpu_ring_is_lockup(ring)) {
311 /* good news we believe it's a lockup */ 273 /* good news we believe it's a lockup */
312 dev_warn(ring->adev->dev, "GPU lockup (current fence id " 274 dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@@ -316,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
316 278
317 /* remember that we need an reset */ 279 /* remember that we need an reset */
318 ring->adev->needs_reset = true; 280 ring->adev->needs_reset = true;
319 wake_up_all(&ring->adev->fence_queue); 281 wake_up_all(&ring->fence_drv.fence_queue);
320 } 282 }
321 up_read(&ring->adev->exclusive_lock); 283 up_read(&ring->adev->exclusive_lock);
322} 284}
@@ -332,62 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
332 */ 294 */
333void amdgpu_fence_process(struct amdgpu_ring *ring) 295void amdgpu_fence_process(struct amdgpu_ring *ring)
334{ 296{
335 uint64_t seq, last_seq, last_emitted; 297 if (amdgpu_fence_activity(ring))
336 unsigned count_loop = 0; 298 wake_up_all(&ring->fence_drv.fence_queue);
337 bool wake = false;
338
339 /* Note there is a scenario here for an infinite loop but it's
340 * very unlikely to happen. For it to happen, the current polling
341 * process need to be interrupted by another process and another
342 * process needs to update the last_seq btw the atomic read and
343 * xchg of the current process.
344 *
345 * More over for this to go in infinite loop there need to be
346 * continuously new fence signaled ie amdgpu_fence_read needs
347 * to return a different value each time for both the currently
348 * polling process and the other process that xchg the last_seq
349 * btw atomic read and xchg of the current process. And the
350 * value the other process set as last seq must be higher than
351 * the seq value we just read. Which means that current process
352 * need to be interrupted after amdgpu_fence_read and before
353 * atomic xchg.
354 *
355 * To be even more safe we count the number of time we loop and
356 * we bail after 10 loop just accepting the fact that we might
357 * have temporarly set the last_seq not to the true real last
358 * seq but to an older one.
359 */
360 last_seq = atomic64_read(&ring->fence_drv.last_seq);
361 do {
362 last_emitted = ring->fence_drv.sync_seq[ring->idx];
363 seq = amdgpu_fence_read(ring);
364 seq |= last_seq & 0xffffffff00000000LL;
365 if (seq < last_seq) {
366 seq &= 0xffffffff;
367 seq |= last_emitted & 0xffffffff00000000LL;
368 }
369
370 if (seq <= last_seq || seq > last_emitted) {
371 break;
372 }
373 /* If we loop over we don't want to return without
374 * checking if a fence is signaled as it means that the
375 * seq we just read is different from the previous on.
376 */
377 wake = true;
378 last_seq = seq;
379 if ((count_loop++) > 10) {
380 /* We looped over too many time leave with the
381 * fact that we might have set an older fence
382 * seq then the current real last seq as signaled
383 * by the hw.
384 */
385 break;
386 }
387 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
388
389 if (wake)
390 wake_up_all(&ring->adev->fence_queue);
391} 299}
392 300
393/** 301/**
@@ -447,284 +355,49 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
447{ 355{
448 struct amdgpu_fence *fence = to_amdgpu_fence(f); 356 struct amdgpu_fence *fence = to_amdgpu_fence(f);
449 struct amdgpu_ring *ring = fence->ring; 357 struct amdgpu_ring *ring = fence->ring;
450 struct amdgpu_device *adev = ring->adev;
451 358
452 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 359 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
453 return false; 360 return false;
454 361
455 if (down_read_trylock(&adev->exclusive_lock)) {
456 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
457 ring->fence_drv.irq_type);
458 if (amdgpu_fence_activity(ring))
459 wake_up_all_locked(&adev->fence_queue);
460
461 /* did fence get signaled after we enabled the sw irq? */
462 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
463 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
464 ring->fence_drv.irq_type);
465 up_read(&adev->exclusive_lock);
466 return false;
467 }
468
469 up_read(&adev->exclusive_lock);
470 } else {
471 /* we're probably in a lockup, lets not fiddle too much */
472 if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
473 ring->fence_drv.irq_type))
474 ring->fence_drv.delayed_irq = true;
475 amdgpu_fence_schedule_check(ring);
476 }
477
478 fence->fence_wake.flags = 0; 362 fence->fence_wake.flags = 0;
479 fence->fence_wake.private = NULL; 363 fence->fence_wake.private = NULL;
480 fence->fence_wake.func = amdgpu_fence_check_signaled; 364 fence->fence_wake.func = amdgpu_fence_check_signaled;
481 __add_wait_queue(&adev->fence_queue, &fence->fence_wake); 365 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
482 fence_get(f); 366 fence_get(f);
483 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 367 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
484 return true; 368 return true;
485} 369}
486 370
487/** 371/*
488 * amdgpu_fence_signaled - check if a fence has signaled 372 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
489 * 373 * @ring: ring to wait on for the seq number
490 * @fence: amdgpu fence object 374 * @seq: seq number wait for
491 *
492 * Check if the requested fence has signaled (all asics).
493 * Returns true if the fence has signaled or false if it has not.
494 */
495bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
496{
497 if (!fence)
498 return true;
499
500 if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
501 if (!fence_signal(&fence->base))
502 FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
503 return true;
504 }
505
506 return false;
507}
508
509/**
510 * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
511 *
512 * @adev: amdgpu device pointer
513 * @seq: sequence numbers
514 *
515 * Check if the last signaled fence sequnce number is >= the requested
516 * sequence number (all asics).
517 * Returns true if any has signaled (current value is >= requested value)
518 * or false if it has not. Helper function for amdgpu_fence_wait_seq.
519 */
520static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
521{
522 unsigned i;
523
524 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
525 if (!adev->rings[i] || !seq[i])
526 continue;
527
528 if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
529 return true;
530 }
531
532 return false;
533}
534
535/**
536 * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
537 *
538 * @adev: amdgpu device pointer
539 * @target_seq: sequence number(s) we want to wait for
540 * @intr: use interruptable sleep
541 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
542 *
543 * Wait for the requested sequence number(s) to be written by any ring
544 * (all asics). Sequnce number array is indexed by ring id.
545 * @intr selects whether to use interruptable (true) or non-interruptable
546 * (false) sleep when waiting for the sequence number. Helper function
547 * for amdgpu_fence_wait_*().
548 * Returns remaining time if the sequence number has passed, 0 when
549 * the wait timeout, or an error for all other cases.
550 * -EDEADLK is returned when a GPU lockup has been detected.
551 */
552static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
553 u64 *target_seq, bool intr,
554 long timeout)
555{
556 uint64_t last_seq[AMDGPU_MAX_RINGS];
557 bool signaled;
558 int i;
559 long r;
560
561 if (timeout == 0) {
562 return amdgpu_fence_any_seq_signaled(adev, target_seq);
563 }
564
565 while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
566
567 /* Save current sequence values, used to check for GPU lockups */
568 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
569 struct amdgpu_ring *ring = adev->rings[i];
570
571 if (!ring || !target_seq[i])
572 continue;
573
574 last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
575 trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
576 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
577 ring->fence_drv.irq_type);
578 }
579
580 if (intr) {
581 r = wait_event_interruptible_timeout(adev->fence_queue, (
582 (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
583 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
584 } else {
585 r = wait_event_timeout(adev->fence_queue, (
586 (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
587 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
588 }
589
590 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
591 struct amdgpu_ring *ring = adev->rings[i];
592
593 if (!ring || !target_seq[i])
594 continue;
595
596 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
597 ring->fence_drv.irq_type);
598 trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
599 }
600
601 if (unlikely(r < 0))
602 return r;
603
604 if (unlikely(!signaled)) {
605
606 if (adev->needs_reset)
607 return -EDEADLK;
608
609 /* we were interrupted for some reason and fence
610 * isn't signaled yet, resume waiting */
611 if (r)
612 continue;
613
614 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
615 struct amdgpu_ring *ring = adev->rings[i];
616
617 if (!ring || !target_seq[i])
618 continue;
619
620 if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
621 break;
622 }
623
624 if (i != AMDGPU_MAX_RINGS)
625 continue;
626
627 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
628 if (!adev->rings[i] || !target_seq[i])
629 continue;
630
631 if (amdgpu_ring_is_lockup(adev->rings[i]))
632 break;
633 }
634
635 if (i < AMDGPU_MAX_RINGS) {
636 /* good news we believe it's a lockup */
637 dev_warn(adev->dev, "GPU lockup (waiting for "
638 "0x%016llx last fence id 0x%016llx on"
639 " ring %d)\n",
640 target_seq[i], last_seq[i], i);
641
642 /* remember that we need an reset */
643 adev->needs_reset = true;
644 wake_up_all(&adev->fence_queue);
645 return -EDEADLK;
646 }
647
648 if (timeout < MAX_SCHEDULE_TIMEOUT) {
649 timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
650 if (timeout <= 0) {
651 return 0;
652 }
653 }
654 }
655 }
656 return timeout;
657}
658
659/**
660 * amdgpu_fence_wait - wait for a fence to signal
661 *
662 * @fence: amdgpu fence object
663 * @intr: use interruptable sleep
664 *
665 * Wait for the requested fence to signal (all asics).
666 * @intr selects whether to use interruptable (true) or non-interruptable
667 * (false) sleep when waiting for the fence.
668 * Returns 0 if the fence has passed, error for all other cases.
669 */
670int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
671{
672 uint64_t seq[AMDGPU_MAX_RINGS] = {};
673 long r;
674
675 seq[fence->ring->idx] = fence->seq;
676 r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
677 if (r < 0) {
678 return r;
679 }
680
681 r = fence_signal(&fence->base);
682 if (!r)
683 FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
684 return 0;
685}
686
687/**
688 * amdgpu_fence_wait_any - wait for a fence to signal on any ring
689 *
690 * @adev: amdgpu device pointer
691 * @fences: amdgpu fence object(s)
692 * @intr: use interruptable sleep
693 * 375 *
694 * Wait for any requested fence to signal (all asics). Fence 376 * return value:
695 * array is indexed by ring id. @intr selects whether to use 377 * 0: seq signaled, and gpu not hang
696 * interruptable (true) or non-interruptable (false) sleep when 378 * -EDEADL: GPU hang detected
697 * waiting for the fences. Used by the suballocator. 379 * -EINVAL: some paramter is not valid
698 * Returns 0 if any fence has passed, error for all other cases.
699 */ 380 */
700int amdgpu_fence_wait_any(struct amdgpu_device *adev, 381static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
701 struct amdgpu_fence **fences,
702 bool intr)
703{ 382{
704 uint64_t seq[AMDGPU_MAX_RINGS]; 383 struct amdgpu_device *adev = ring->adev;
705 unsigned i, num_rings = 0; 384 bool signaled = false;
706 long r;
707
708 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
709 seq[i] = 0;
710 385
711 if (!fences[i]) { 386 BUG_ON(!ring);
712 continue; 387 if (seq > ring->fence_drv.sync_seq[ring->idx])
713 } 388 return -EINVAL;
714 389
715 seq[i] = fences[i]->seq; 390 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
716 ++num_rings; 391 return 0;
717 }
718 392
719 /* nothing to wait for ? */ 393 wait_event(ring->fence_drv.fence_queue, (
720 if (num_rings == 0) 394 (signaled = amdgpu_fence_seq_signaled(ring, seq))
721 return -ENOENT; 395 || adev->needs_reset));
722 396
723 r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT); 397 if (signaled)
724 if (r < 0) { 398 return 0;
725 return r; 399 else
726 } 400 return -EDEADLK;
727 return 0;
728} 401}
729 402
730/** 403/**
@@ -739,19 +412,12 @@ int amdgpu_fence_wait_any(struct amdgpu_device *adev,
739 */ 412 */
740int amdgpu_fence_wait_next(struct amdgpu_ring *ring) 413int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
741{ 414{
742 uint64_t seq[AMDGPU_MAX_RINGS] = {}; 415 uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
743 long r;
744 416
745 seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 417 if (seq >= ring->fence_drv.sync_seq[ring->idx])
746 if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
747 /* nothing to wait for, last_seq is
748 already the last emited fence */
749 return -ENOENT; 418 return -ENOENT;
750 } 419
751 r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT); 420 return amdgpu_fence_ring_wait_seq(ring, seq);
752 if (r < 0)
753 return r;
754 return 0;
755} 421}
756 422
757/** 423/**
@@ -766,23 +432,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
766 */ 432 */
767int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 433int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
768{ 434{
769 struct amdgpu_device *adev = ring->adev; 435 uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
770 uint64_t seq[AMDGPU_MAX_RINGS] = {};
771 long r;
772 436
773 seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; 437 if (!seq)
774 if (!seq[ring->idx])
775 return 0; 438 return 0;
776 439
777 r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); 440 return amdgpu_fence_ring_wait_seq(ring, seq);
778 if (r < 0) {
779 if (r == -EDEADLK)
780 return -EDEADLK;
781
782 dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
783 ring->idx, r);
784 }
785 return 0;
786} 441}
787 442
788/** 443/**
@@ -933,9 +588,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
933 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 588 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
934 } 589 }
935 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); 590 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
936 ring->fence_drv.initialized = true; 591 amdgpu_irq_get(adev, irq_src, irq_type);
592
937 ring->fence_drv.irq_src = irq_src; 593 ring->fence_drv.irq_src = irq_src;
938 ring->fence_drv.irq_type = irq_type; 594 ring->fence_drv.irq_type = irq_type;
595 ring->fence_drv.initialized = true;
596
939 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 597 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
940 "cpu addr 0x%p\n", ring->idx, 598 "cpu addr 0x%p\n", ring->idx,
941 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 599 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -966,6 +624,16 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
966 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 624 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
967 amdgpu_fence_check_lockup); 625 amdgpu_fence_check_lockup);
968 ring->fence_drv.ring = ring; 626 ring->fence_drv.ring = ring;
627
628 if (amdgpu_enable_scheduler) {
629 ring->scheduler = amd_sched_create((void *)ring->adev,
630 &amdgpu_sched_ops,
631 ring->idx, 5, 0,
632 amdgpu_sched_hw_submission);
633 if (!ring->scheduler)
634 DRM_ERROR("Failed to create scheduler on ring %d.\n",
635 ring->idx);
636 }
969} 637}
970 638
971/** 639/**
@@ -982,7 +650,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
982 */ 650 */
983int amdgpu_fence_driver_init(struct amdgpu_device *adev) 651int amdgpu_fence_driver_init(struct amdgpu_device *adev)
984{ 652{
985 init_waitqueue_head(&adev->fence_queue);
986 if (amdgpu_debugfs_fence_init(adev)) 653 if (amdgpu_debugfs_fence_init(adev))
987 dev_err(adev->dev, "fence debugfs file creation failed\n"); 654 dev_err(adev->dev, "fence debugfs file creation failed\n");
988 655
@@ -1011,13 +678,78 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
1011 /* no need to trigger GPU reset as we are unloading */ 678 /* no need to trigger GPU reset as we are unloading */
1012 amdgpu_fence_driver_force_completion(adev); 679 amdgpu_fence_driver_force_completion(adev);
1013 } 680 }
1014 wake_up_all(&adev->fence_queue); 681 wake_up_all(&ring->fence_drv.fence_queue);
682 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
683 ring->fence_drv.irq_type);
684 if (ring->scheduler)
685 amd_sched_destroy(ring->scheduler);
1015 ring->fence_drv.initialized = false; 686 ring->fence_drv.initialized = false;
1016 } 687 }
1017 mutex_unlock(&adev->ring_lock); 688 mutex_unlock(&adev->ring_lock);
1018} 689}
1019 690
1020/** 691/**
692 * amdgpu_fence_driver_suspend - suspend the fence driver
693 * for all possible rings.
694 *
695 * @adev: amdgpu device pointer
696 *
697 * Suspend the fence driver for all possible rings (all asics).
698 */
699void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
700{
701 int i, r;
702
703 mutex_lock(&adev->ring_lock);
704 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
705 struct amdgpu_ring *ring = adev->rings[i];
706 if (!ring || !ring->fence_drv.initialized)
707 continue;
708
709 /* wait for gpu to finish processing current batch */
710 r = amdgpu_fence_wait_empty(ring);
711 if (r) {
712 /* delay GPU reset to resume */
713 amdgpu_fence_driver_force_completion(adev);
714 }
715
716 /* disable the interrupt */
717 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
718 ring->fence_drv.irq_type);
719 }
720 mutex_unlock(&adev->ring_lock);
721}
722
723/**
724 * amdgpu_fence_driver_resume - resume the fence driver
725 * for all possible rings.
726 *
727 * @adev: amdgpu device pointer
728 *
729 * Resume the fence driver for all possible rings (all asics).
730 * Not all asics have all rings, so each asic will only
731 * start the fence driver on the rings it has using
732 * amdgpu_fence_driver_start_ring().
733 * Returns 0 for success.
734 */
735void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
736{
737 int i;
738
739 mutex_lock(&adev->ring_lock);
740 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
741 struct amdgpu_ring *ring = adev->rings[i];
742 if (!ring || !ring->fence_drv.initialized)
743 continue;
744
745 /* enable the interrupt */
746 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
747 ring->fence_drv.irq_type);
748 }
749 mutex_unlock(&adev->ring_lock);
750}
751
752/**
1021 * amdgpu_fence_driver_force_completion - force all fence waiter to complete 753 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
1022 * 754 *
1023 * @adev: amdgpu device pointer 755 * @adev: amdgpu device pointer
@@ -1104,6 +836,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
1104 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 836 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1105} 837}
1106 838
839static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
840{
841 int idx;
842 struct amdgpu_fence *fence;
843
844 idx = 0;
845 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
846 fence = fences[idx];
847 if (fence) {
848 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
849 return true;
850 }
851 }
852 return false;
853}
854
1107struct amdgpu_wait_cb { 855struct amdgpu_wait_cb {
1108 struct fence_cb base; 856 struct fence_cb base;
1109 struct task_struct *task; 857 struct task_struct *task;
@@ -1119,14 +867,35 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1119static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, 867static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
1120 signed long t) 868 signed long t)
1121{ 869{
870 struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
1122 struct amdgpu_fence *fence = to_amdgpu_fence(f); 871 struct amdgpu_fence *fence = to_amdgpu_fence(f);
1123 struct amdgpu_device *adev = fence->ring->adev; 872 struct amdgpu_device *adev = fence->ring->adev;
1124 struct amdgpu_wait_cb cb;
1125 873
1126 cb.task = current; 874 memset(&array[0], 0, sizeof(array));
875 array[0] = fence;
876
877 return amdgpu_fence_wait_any(adev, array, intr, t);
878}
879
880/* wait until any fence in array signaled */
881signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
882 struct amdgpu_fence **array, bool intr, signed long t)
883{
884 long idx = 0;
885 struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
886 struct amdgpu_fence *fence;
887
888 BUG_ON(!array);
1127 889
1128 if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb)) 890 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
1129 return t; 891 fence = array[idx];
892 if (fence) {
893 cb[idx].task = current;
894 if (fence_add_callback(&fence->base,
895 &cb[idx].base, amdgpu_fence_wait_cb))
896 return t; /* return if fence is already signaled */
897 }
898 }
1130 899
1131 while (t > 0) { 900 while (t > 0) {
1132 if (intr) 901 if (intr)
@@ -1135,10 +904,10 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
1135 set_current_state(TASK_UNINTERRUPTIBLE); 904 set_current_state(TASK_UNINTERRUPTIBLE);
1136 905
1137 /* 906 /*
1138 * amdgpu_test_signaled must be called after 907 * amdgpu_test_signaled_any must be called after
1139 * set_current_state to prevent a race with wake_up_process 908 * set_current_state to prevent a race with wake_up_process
1140 */ 909 */
1141 if (amdgpu_test_signaled(fence)) 910 if (amdgpu_test_signaled_any(array))
1142 break; 911 break;
1143 912
1144 if (adev->needs_reset) { 913 if (adev->needs_reset) {
@@ -1153,7 +922,13 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
1153 } 922 }
1154 923
1155 __set_current_state(TASK_RUNNING); 924 __set_current_state(TASK_RUNNING);
1156 fence_remove_callback(f, &cb.base); 925
926 idx = 0;
927 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
928 fence = array[idx];
929 if (fence)
930 fence_remove_callback(&fence->base, &cb[idx].base);
931 }
1157 932
1158 return t; 933 return t;
1159} 934}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bc0fac618a3f..5104e64e9ad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -88,6 +88,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
88 ib->fence = NULL; 88 ib->fence = NULL;
89 ib->user = NULL; 89 ib->user = NULL;
90 ib->vm = vm; 90 ib->vm = vm;
91 ib->ctx = NULL;
91 ib->gds_base = 0; 92 ib->gds_base = 0;
92 ib->gds_size = 0; 93 ib->gds_size = 0;
93 ib->gws_base = 0; 94 ib->gws_base = 0;
@@ -142,6 +143,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
142 struct amdgpu_ring *ring; 143 struct amdgpu_ring *ring;
143 struct amdgpu_ctx *ctx, *old_ctx; 144 struct amdgpu_ctx *ctx, *old_ctx;
144 struct amdgpu_vm *vm; 145 struct amdgpu_vm *vm;
146 uint64_t sequence;
145 unsigned i; 147 unsigned i;
146 int r = 0; 148 int r = 0;
147 149
@@ -165,9 +167,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
165 167
166 if (vm) { 168 if (vm) {
167 /* grab a vm id if necessary */ 169 /* grab a vm id if necessary */
168 struct amdgpu_fence *vm_id_fence = NULL; 170 r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
169 vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm); 171 if (r) {
170 amdgpu_sync_fence(&ibs->sync, vm_id_fence); 172 amdgpu_ring_unlock_undo(ring);
173 return r;
174 }
171 } 175 }
172 176
173 r = amdgpu_sync_rings(&ibs->sync, ring); 177 r = amdgpu_sync_rings(&ibs->sync, ring);
@@ -212,11 +216,18 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
212 return r; 216 return r;
213 } 217 }
214 218
219 sequence = amdgpu_enable_scheduler ? ib->sequence : 0;
220
221 if (!amdgpu_enable_scheduler && ib->ctx)
222 ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
223 &ib->fence->base,
224 sequence);
225
215 /* wrap the last IB with fence */ 226 /* wrap the last IB with fence */
216 if (ib->user) { 227 if (ib->user) {
217 uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); 228 uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
218 addr += ib->user->offset; 229 addr += ib->user->offset;
219 amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, 230 amdgpu_ring_emit_fence(ring, addr, ib->sequence,
220 AMDGPU_FENCE_FLAG_64BIT); 231 AMDGPU_FENCE_FLAG_64BIT);
221 } 232 }
222 233
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index fb44dd2231b1..90044b254404 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -206,6 +206,8 @@ restart_ih:
206 amdgpu_amdkfd_interrupt(adev, 206 amdgpu_amdkfd_interrupt(adev,
207 (const void *) &adev->irq.ih.ring[ring_index]); 207 (const void *) &adev->irq.ih.ring[ring_index]);
208 208
209 entry.iv_entry = (const uint32_t *)
210 &adev->irq.ih.ring[ring_index];
209 amdgpu_ih_decode_iv(adev, &entry); 211 amdgpu_ih_decode_iv(adev, &entry);
210 adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; 212 adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
211 213
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index c62b09e555d6..ba38ae6a1463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -52,6 +52,7 @@ struct amdgpu_iv_entry {
52 unsigned ring_id; 52 unsigned ring_id;
53 unsigned vm_id; 53 unsigned vm_id;
54 unsigned pas_id; 54 unsigned pas_id;
55 const uint32_t *iv_entry;
55}; 56};
56 57
57int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, 58int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b4d36f0f2153..0aba8e9bc8a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -272,6 +272,11 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
272 272
273 kfree(src->enabled_types); 273 kfree(src->enabled_types);
274 src->enabled_types = NULL; 274 src->enabled_types = NULL;
275 if (src->data) {
276 kfree(src->data);
277 kfree(src);
278 adev->irq.sources[i] = NULL;
279 }
275 } 280 }
276} 281}
277 282
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 8299795f2b2d..17b01aef4278 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -40,6 +40,7 @@ struct amdgpu_irq_src {
40 unsigned num_types; 40 unsigned num_types;
41 atomic_t *enabled_types; 41 atomic_t *enabled_types;
42 const struct amdgpu_irq_src_funcs *funcs; 42 const struct amdgpu_irq_src_funcs *funcs;
43 void *data;
43}; 44};
44 45
45/* provided by interrupt generating IP blocks */ 46/* provided by interrupt generating IP blocks */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 93000af92283..87da6b1848fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -96,8 +96,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
96 96
97 if ((amdgpu_runtime_pm != 0) && 97 if ((amdgpu_runtime_pm != 0) &&
98 amdgpu_has_atpx() && 98 amdgpu_has_atpx() &&
99 ((flags & AMDGPU_IS_APU) == 0)) 99 ((flags & AMD_IS_APU) == 0))
100 flags |= AMDGPU_IS_PX; 100 flags |= AMD_IS_PX;
101 101
102 /* amdgpu_device_init should report only fatal error 102 /* amdgpu_device_init should report only fatal error
103 * like memory allocation failure or iomapping failure, 103 * like memory allocation failure or iomapping failure,
@@ -451,11 +451,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
451 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 451 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
452 dev_info._pad = 0; 452 dev_info._pad = 0;
453 dev_info.ids_flags = 0; 453 dev_info.ids_flags = 0;
454 if (adev->flags & AMDGPU_IS_APU) 454 if (adev->flags & AMD_IS_APU)
455 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 455 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
456 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 456 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
457 dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 457 dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
458 dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL); 458 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
459 dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * 459 dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
460 AMDGPU_GPU_PAGE_SIZE; 460 AMDGPU_GPU_PAGE_SIZE;
461 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; 461 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -527,10 +527,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
527 mutex_init(&fpriv->bo_list_lock); 527 mutex_init(&fpriv->bo_list_lock);
528 idr_init(&fpriv->bo_list_handles); 528 idr_init(&fpriv->bo_list_handles);
529 529
530 /* init context manager */ 530 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
531 mutex_init(&fpriv->ctx_mgr.lock);
532 idr_init(&fpriv->ctx_mgr.ctx_handles);
533 fpriv->ctx_mgr.adev = adev;
534 531
535 file_priv->driver_priv = fpriv; 532 file_priv->driver_priv = fpriv;
536 533
@@ -571,8 +568,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
571 idr_destroy(&fpriv->bo_list_handles); 568 idr_destroy(&fpriv->bo_list_handles);
572 mutex_destroy(&fpriv->bo_list_lock); 569 mutex_destroy(&fpriv->bo_list_lock);
573 570
574 /* release context */ 571 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
575 amdgpu_ctx_fini(fpriv);
576 572
577 kfree(fpriv); 573 kfree(fpriv);
578 file_priv->driver_priv = NULL; 574 file_priv->driver_priv = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8da64245b31b..57adcad2f7ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -223,18 +223,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
223 size_t acc_size; 223 size_t acc_size;
224 int r; 224 int r;
225 225
226 /* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
227 * do this as a temporary workaround
228 */
229 if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
230 if (adev->asic_type >= CHIP_TOPAZ) {
231 if (byte_align & 0x7fff)
232 byte_align = ALIGN(byte_align, 0x8000);
233 if (size & 0x7fff)
234 size = ALIGN(size, 0x8000);
235 }
236 }
237
238 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 226 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
239 size = ALIGN(size, PAGE_SIZE); 227 size = ALIGN(size, PAGE_SIZE);
240 228
@@ -462,7 +450,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
462int amdgpu_bo_evict_vram(struct amdgpu_device *adev) 450int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
463{ 451{
464 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 452 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
465 if (0 && (adev->flags & AMDGPU_IS_APU)) { 453 if (0 && (adev->flags & AMD_IS_APU)) {
466 /* Useless to evict on IGP chips */ 454 /* Useless to evict on IGP chips */
467 return 0; 455 return 0;
468 } 456 }
@@ -478,7 +466,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
478 } 466 }
479 dev_err(adev->dev, "Userspace still has active objects !\n"); 467 dev_err(adev->dev, "Userspace still has active objects !\n");
480 list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { 468 list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
481 mutex_lock(&adev->ddev->struct_mutex);
482 dev_err(adev->dev, "%p %p %lu %lu force free\n", 469 dev_err(adev->dev, "%p %p %lu %lu force free\n",
483 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 470 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
484 *((unsigned long *)&bo->gem_base.refcount)); 471 *((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +473,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
486 list_del_init(&bo->list); 473 list_del_init(&bo->list);
487 mutex_unlock(&bo->adev->gem.mutex); 474 mutex_unlock(&bo->adev->gem.mutex);
488 /* this should unref the ttm bo */ 475 /* this should unref the ttm bo */
489 drm_gem_object_unreference(&bo->gem_base); 476 drm_gem_object_unreference_unlocked(&bo->gem_base);
490 mutex_unlock(&adev->ddev->struct_mutex);
491 } 477 }
492} 478}
493 479
@@ -658,13 +644,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
658 * @shared: true if fence should be added shared 644 * @shared: true if fence should be added shared
659 * 645 *
660 */ 646 */
661void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, 647void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
662 bool shared) 648 bool shared)
663{ 649{
664 struct reservation_object *resv = bo->tbo.resv; 650 struct reservation_object *resv = bo->tbo.resv;
665 651
666 if (shared) 652 if (shared)
667 reservation_object_add_shared_fence(resv, &fence->base); 653 reservation_object_add_shared_fence(resv, fence);
668 else 654 else
669 reservation_object_add_excl_fence(resv, &fence->base); 655 reservation_object_add_excl_fence(resv, fence);
670} 656}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 675bdc30e41d..238465a9ac55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -161,7 +161,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
161void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 161void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
162 struct ttm_mem_reg *new_mem); 162 struct ttm_mem_reg *new_mem);
163int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 163int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
164void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, 164void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
165 bool shared); 165 bool shared);
166 166
167/* 167/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ed13baa7c976..efed11509f4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -82,7 +82,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
82 mutex_unlock(&adev->pm.mutex); 82 mutex_unlock(&adev->pm.mutex);
83 83
84 /* Can't set dpm state when the card is off */ 84 /* Can't set dpm state when the card is off */
85 if (!(adev->flags & AMDGPU_IS_PX) || 85 if (!(adev->flags & AMD_IS_PX) ||
86 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 86 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
87 amdgpu_pm_compute_clocks(adev); 87 amdgpu_pm_compute_clocks(adev);
88fail: 88fail:
@@ -538,7 +538,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
538 /* vce just modifies an existing state so force a change */ 538 /* vce just modifies an existing state so force a change */
539 if (ps->vce_active != adev->pm.dpm.vce_active) 539 if (ps->vce_active != adev->pm.dpm.vce_active)
540 goto force; 540 goto force;
541 if (adev->flags & AMDGPU_IS_APU) { 541 if (adev->flags & AMD_IS_APU) {
542 /* for APUs if the num crtcs changed but state is the same, 542 /* for APUs if the num crtcs changed but state is the same,
543 * all we need to do is update the display configuration. 543 * all we need to do is update the display configuration.
544 */ 544 */
@@ -580,7 +580,6 @@ force:
580 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 580 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
581 } 581 }
582 582
583 mutex_lock(&adev->ddev->struct_mutex);
584 mutex_lock(&adev->ring_lock); 583 mutex_lock(&adev->ring_lock);
585 584
586 /* update whether vce is active */ 585 /* update whether vce is active */
@@ -628,7 +627,6 @@ force:
628 627
629done: 628done:
630 mutex_unlock(&adev->ring_lock); 629 mutex_unlock(&adev->ring_lock);
631 mutex_unlock(&adev->ddev->struct_mutex);
632} 630}
633 631
634void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 632void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 855e2196657a..7d442c51063e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -342,6 +342,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
342 amdgpu_fence_driver_init_ring(ring); 342 amdgpu_fence_driver_init_ring(ring);
343 } 343 }
344 344
345 init_waitqueue_head(&ring->fence_drv.fence_queue);
346
345 r = amdgpu_wb_get(adev, &ring->rptr_offs); 347 r = amdgpu_wb_get(adev, &ring->rptr_offs);
346 if (r) { 348 if (r) {
347 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 349 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -367,7 +369,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
367 } 369 }
368 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); 370 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
369 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; 371 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
370 372 spin_lock_init(&ring->fence_lock);
371 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); 373 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
372 if (r) { 374 if (r) {
373 dev_err(adev->dev, "failed initializing fences (%d).\n", r); 375 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index eb20987ce18d..d6398cf45f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -160,7 +160,8 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
160 160
161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); 161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163 if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) { 163 if (sa_bo->fence == NULL ||
164 !fence_is_signaled(&sa_bo->fence->base)) {
164 return; 165 return;
165 } 166 }
166 amdgpu_sa_bo_remove_locked(sa_bo); 167 amdgpu_sa_bo_remove_locked(sa_bo);
@@ -274,7 +275,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
274 sa_bo = list_first_entry(&sa_manager->flist[i], 275 sa_bo = list_first_entry(&sa_manager->flist[i],
275 struct amdgpu_sa_bo, flist); 276 struct amdgpu_sa_bo, flist);
276 277
277 if (!amdgpu_fence_signaled(sa_bo->fence)) { 278 if (!fence_is_signaled(&sa_bo->fence->base)) {
278 fences[i] = sa_bo->fence; 279 fences[i] = sa_bo->fence;
279 continue; 280 continue;
280 } 281 }
@@ -317,6 +318,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
317 struct amdgpu_fence *fences[AMDGPU_MAX_RINGS]; 318 struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
318 unsigned tries[AMDGPU_MAX_RINGS]; 319 unsigned tries[AMDGPU_MAX_RINGS];
319 int i, r; 320 int i, r;
321 signed long t;
320 322
321 BUG_ON(align > sa_manager->align); 323 BUG_ON(align > sa_manager->align);
322 BUG_ON(size > sa_manager->size); 324 BUG_ON(size > sa_manager->size);
@@ -350,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
350 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 352 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
351 353
352 spin_unlock(&sa_manager->wq.lock); 354 spin_unlock(&sa_manager->wq.lock);
353 r = amdgpu_fence_wait_any(adev, fences, false); 355 t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
356 r = (t > 0) ? 0 : t;
354 spin_lock(&sa_manager->wq.lock); 357 spin_lock(&sa_manager->wq.lock);
355 /* if we have nothing to wait for block */ 358 /* if we have nothing to wait for block */
356 if (r == -ENOENT) { 359 if (r == -ENOENT) {
@@ -379,7 +382,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
379 382
380 sa_manager = (*sa_bo)->manager; 383 sa_manager = (*sa_bo)->manager;
381 spin_lock(&sa_manager->wq.lock); 384 spin_lock(&sa_manager->wq.lock);
382 if (fence && !amdgpu_fence_signaled(fence)) { 385 if (fence && !fence_is_signaled(&fence->base)) {
383 (*sa_bo)->fence = amdgpu_fence_ref(fence); 386 (*sa_bo)->fence = amdgpu_fence_ref(fence);
384 list_add_tail(&(*sa_bo)->flist, 387 list_add_tail(&(*sa_bo)->flist,
385 &sa_manager->flist[fence->ring->idx]); 388 &sa_manager->flist[fence->ring->idx]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..a86e38158afa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29
30static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31 struct amd_sched_entity *entity,
32 struct amd_sched_job *job)
33{
34 int r = 0;
35 struct amdgpu_cs_parser *sched_job;
36 if (!job || !job->data) {
37 DRM_ERROR("job is null\n");
38 return -EINVAL;
39 }
40
41 sched_job = (struct amdgpu_cs_parser *)job->data;
42 if (sched_job->prepare_job) {
43 r = sched_job->prepare_job(sched_job);
44 if (r) {
45 DRM_ERROR("Prepare job error\n");
46 schedule_work(&sched_job->job_work);
47 }
48 }
49 return r;
50}
51
52static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
53 struct amd_sched_entity *entity,
54 struct amd_sched_job *job)
55{
56 int r = 0;
57 struct amdgpu_cs_parser *sched_job;
58 struct amdgpu_fence *fence;
59
60 if (!job || !job->data) {
61 DRM_ERROR("job is null\n");
62 return NULL;
63 }
64 sched_job = (struct amdgpu_cs_parser *)job->data;
65 mutex_lock(&sched_job->job_lock);
66 r = amdgpu_ib_schedule(sched_job->adev,
67 sched_job->num_ibs,
68 sched_job->ibs,
69 sched_job->filp);
70 if (r)
71 goto err;
72 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
73
74 if (sched_job->run_job) {
75 r = sched_job->run_job(sched_job);
76 if (r)
77 goto err;
78 }
79
80 mutex_unlock(&sched_job->job_lock);
81 return &fence->base;
82
83err:
84 DRM_ERROR("Run job error\n");
85 mutex_unlock(&sched_job->job_lock);
86 schedule_work(&sched_job->job_work);
87 return NULL;
88}
89
90static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
91 struct amd_sched_job *job)
92{
93 struct amdgpu_cs_parser *sched_job;
94
95 if (!job || !job->data) {
96 DRM_ERROR("job is null\n");
97 return;
98 }
99 sched_job = (struct amdgpu_cs_parser *)job->data;
100 schedule_work(&sched_job->job_work);
101}
102
103struct amd_sched_backend_ops amdgpu_sched_ops = {
104 .prepare_job = amdgpu_sched_prepare_job,
105 .run_job = amdgpu_sched_run_job,
106 .process_job = amdgpu_sched_process_job
107};
108
109int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
110 struct amdgpu_ring *ring,
111 struct amdgpu_ib *ibs,
112 unsigned num_ibs,
113 int (*free_job)(struct amdgpu_cs_parser *),
114 void *owner,
115 struct fence **f)
116{
117 int r = 0;
118 if (amdgpu_enable_scheduler) {
119 struct amdgpu_cs_parser *sched_job =
120 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
121 ibs, num_ibs);
122 if(!sched_job) {
123 return -ENOMEM;
124 }
125 sched_job->free_job = free_job;
126 mutex_lock(&sched_job->job_lock);
127 r = amd_sched_push_job(ring->scheduler,
128 &adev->kernel_ctx.rings[ring->idx].entity,
129 sched_job, &sched_job->s_fence);
130 if (r) {
131 mutex_unlock(&sched_job->job_lock);
132 kfree(sched_job);
133 return r;
134 }
135 ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
136 *f = fence_get(&sched_job->s_fence->base);
137 mutex_unlock(&sched_job->job_lock);
138 } else {
139 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
140 if (r)
141 return r;
142 *f = fence_get(&ibs[num_ibs - 1].fence->base);
143 }
144 return 0;
145}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 21accbdd0a1a..7cb711fc1ee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -53,20 +53,24 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
53} 53}
54 54
55/** 55/**
56 * amdgpu_sync_fence - use the semaphore to sync to a fence 56 * amdgpu_sync_fence - remember to sync to this fence
57 * 57 *
58 * @sync: sync object to add fence to 58 * @sync: sync object to add fence to
59 * @fence: fence to sync to 59 * @fence: fence to sync to
60 * 60 *
61 * Sync to the fence using the semaphore objects
62 */ 61 */
63void amdgpu_sync_fence(struct amdgpu_sync *sync, 62int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
64 struct amdgpu_fence *fence) 63 struct fence *f)
65{ 64{
65 struct amdgpu_fence *fence;
66 struct amdgpu_fence *other; 66 struct amdgpu_fence *other;
67 67
68 if (!fence) 68 if (!f)
69 return; 69 return 0;
70
71 fence = to_amdgpu_fence(f);
72 if (!fence || fence->ring->adev != adev)
73 return fence_wait(f, true);
70 74
71 other = sync->sync_to[fence->ring->idx]; 75 other = sync->sync_to[fence->ring->idx];
72 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref( 76 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
@@ -79,6 +83,8 @@ void amdgpu_sync_fence(struct amdgpu_sync *sync,
79 amdgpu_fence_later(fence, other)); 83 amdgpu_fence_later(fence, other));
80 amdgpu_fence_unref(&other); 84 amdgpu_fence_unref(&other);
81 } 85 }
86
87 return 0;
82} 88}
83 89
84/** 90/**
@@ -106,11 +112,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
106 112
107 /* always sync to the exclusive fence */ 113 /* always sync to the exclusive fence */
108 f = reservation_object_get_excl(resv); 114 f = reservation_object_get_excl(resv);
109 fence = f ? to_amdgpu_fence(f) : NULL; 115 r = amdgpu_sync_fence(adev, sync, f);
110 if (fence && fence->ring->adev == adev)
111 amdgpu_sync_fence(sync, fence);
112 else if (f)
113 r = fence_wait(f, true);
114 116
115 flist = reservation_object_get_list(resv); 117 flist = reservation_object_get_list(resv);
116 if (!flist || r) 118 if (!flist || r)
@@ -121,14 +123,26 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
121 reservation_object_held(resv)); 123 reservation_object_held(resv));
122 fence = f ? to_amdgpu_fence(f) : NULL; 124 fence = f ? to_amdgpu_fence(f) : NULL;
123 if (fence && fence->ring->adev == adev) { 125 if (fence && fence->ring->adev == adev) {
124 if (fence->owner != owner || 126 /* VM updates are only interesting
125 fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED) 127 * for other VM updates and moves.
126 amdgpu_sync_fence(sync, fence); 128 */
127 } else if (f) { 129 if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
128 r = fence_wait(f, true); 130 (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
129 if (r) 131 ((owner == AMDGPU_FENCE_OWNER_VM) !=
130 break; 132 (fence->owner == AMDGPU_FENCE_OWNER_VM)))
133 continue;
134
135 /* Ignore fence from the same owner as
136 * long as it isn't undefined.
137 */
138 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
139 fence->owner == owner)
140 continue;
131 } 141 }
142
143 r = amdgpu_sync_fence(adev, sync, f);
144 if (r)
145 break;
132 } 146 }
133 return r; 147 return r;
134} 148}
@@ -164,9 +178,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
164 return -EINVAL; 178 return -EINVAL;
165 } 179 }
166 180
167 if (count >= AMDGPU_NUM_SYNCS) { 181 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
168 /* not enough room, wait manually */ 182 /* not enough room, wait manually */
169 r = amdgpu_fence_wait(fence, false); 183 r = fence_wait(&fence->base, false);
170 if (r) 184 if (r)
171 return r; 185 return r;
172 continue; 186 continue;
@@ -186,7 +200,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
186 if (!amdgpu_semaphore_emit_signal(other, semaphore)) { 200 if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
187 /* signaling wasn't successful wait manually */ 201 /* signaling wasn't successful wait manually */
188 amdgpu_ring_undo(other); 202 amdgpu_ring_undo(other);
189 r = amdgpu_fence_wait(fence, false); 203 r = fence_wait(&fence->base, false);
190 if (r) 204 if (r)
191 return r; 205 return r;
192 continue; 206 continue;
@@ -196,7 +210,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
196 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { 210 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
197 /* waiting wasn't successful wait manually */ 211 /* waiting wasn't successful wait manually */
198 amdgpu_ring_undo(other); 212 amdgpu_ring_undo(other);
199 r = amdgpu_fence_wait(fence, false); 213 r = fence_wait(&fence->base, false);
200 if (r) 214 if (r)
201 return r; 215 return r;
202 continue; 216 continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index df202999fbfe..962dd5552137 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -116,7 +116,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
116 goto out_lclean_unpin; 116 goto out_lclean_unpin;
117 } 117 }
118 118
119 r = amdgpu_fence_wait(fence, false); 119 r = fence_wait(&fence->base, false);
120 if (r) { 120 if (r) {
121 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 121 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
122 goto out_lclean_unpin; 122 goto out_lclean_unpin;
@@ -161,7 +161,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
161 goto out_lclean_unpin; 161 goto out_lclean_unpin;
162 } 162 }
163 163
164 r = amdgpu_fence_wait(fence, false); 164 r = fence_wait(&fence->base, false);
165 if (r) { 165 if (r) {
166 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 166 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
167 goto out_lclean_unpin; 167 goto out_lclean_unpin;
@@ -238,7 +238,7 @@ void amdgpu_test_moves(struct amdgpu_device *adev)
238 238
239static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, 239static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
240 struct amdgpu_ring *ring, 240 struct amdgpu_ring *ring,
241 struct amdgpu_fence **fence) 241 struct fence **fence)
242{ 242{
243 uint32_t handle = ring->idx ^ 0xdeafbeef; 243 uint32_t handle = ring->idx ^ 0xdeafbeef;
244 int r; 244 int r;
@@ -269,15 +269,16 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
269 DRM_ERROR("Failed to get dummy destroy msg\n"); 269 DRM_ERROR("Failed to get dummy destroy msg\n");
270 return r; 270 return r;
271 } 271 }
272
273 } else { 272 } else {
273 struct amdgpu_fence *a_fence = NULL;
274 r = amdgpu_ring_lock(ring, 64); 274 r = amdgpu_ring_lock(ring, 64);
275 if (r) { 275 if (r) {
276 DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 276 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
277 return r; 277 return r;
278 } 278 }
279 amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); 279 amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
280 amdgpu_ring_unlock_commit(ring); 280 amdgpu_ring_unlock_commit(ring);
281 *fence = &a_fence->base;
281 } 282 }
282 return 0; 283 return 0;
283} 284}
@@ -286,7 +287,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
286 struct amdgpu_ring *ringA, 287 struct amdgpu_ring *ringA,
287 struct amdgpu_ring *ringB) 288 struct amdgpu_ring *ringB)
288{ 289{
289 struct amdgpu_fence *fence1 = NULL, *fence2 = NULL; 290 struct fence *fence1 = NULL, *fence2 = NULL;
290 struct amdgpu_semaphore *semaphore = NULL; 291 struct amdgpu_semaphore *semaphore = NULL;
291 int r; 292 int r;
292 293
@@ -322,7 +323,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
322 323
323 mdelay(1000); 324 mdelay(1000);
324 325
325 if (amdgpu_fence_signaled(fence1)) { 326 if (fence_is_signaled(fence1)) {
326 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 327 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
327 goto out_cleanup; 328 goto out_cleanup;
328 } 329 }
@@ -335,7 +336,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
335 amdgpu_semaphore_emit_signal(ringB, semaphore); 336 amdgpu_semaphore_emit_signal(ringB, semaphore);
336 amdgpu_ring_unlock_commit(ringB); 337 amdgpu_ring_unlock_commit(ringB);
337 338
338 r = amdgpu_fence_wait(fence1, false); 339 r = fence_wait(fence1, false);
339 if (r) { 340 if (r) {
340 DRM_ERROR("Failed to wait for sync fence 1\n"); 341 DRM_ERROR("Failed to wait for sync fence 1\n");
341 goto out_cleanup; 342 goto out_cleanup;
@@ -343,7 +344,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
343 344
344 mdelay(1000); 345 mdelay(1000);
345 346
346 if (amdgpu_fence_signaled(fence2)) { 347 if (fence_is_signaled(fence2)) {
347 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 348 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
348 goto out_cleanup; 349 goto out_cleanup;
349 } 350 }
@@ -356,7 +357,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
356 amdgpu_semaphore_emit_signal(ringB, semaphore); 357 amdgpu_semaphore_emit_signal(ringB, semaphore);
357 amdgpu_ring_unlock_commit(ringB); 358 amdgpu_ring_unlock_commit(ringB);
358 359
359 r = amdgpu_fence_wait(fence2, false); 360 r = fence_wait(fence2, false);
360 if (r) { 361 if (r) {
361 DRM_ERROR("Failed to wait for sync fence 1\n"); 362 DRM_ERROR("Failed to wait for sync fence 1\n");
362 goto out_cleanup; 363 goto out_cleanup;
@@ -366,10 +367,10 @@ out_cleanup:
366 amdgpu_semaphore_free(adev, &semaphore, NULL); 367 amdgpu_semaphore_free(adev, &semaphore, NULL);
367 368
368 if (fence1) 369 if (fence1)
369 amdgpu_fence_unref(&fence1); 370 fence_put(fence1);
370 371
371 if (fence2) 372 if (fence2)
372 amdgpu_fence_unref(&fence2); 373 fence_put(fence2);
373 374
374 if (r) 375 if (r)
375 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 376 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
@@ -380,7 +381,7 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
380 struct amdgpu_ring *ringB, 381 struct amdgpu_ring *ringB,
381 struct amdgpu_ring *ringC) 382 struct amdgpu_ring *ringC)
382{ 383{
383 struct amdgpu_fence *fenceA = NULL, *fenceB = NULL; 384 struct fence *fenceA = NULL, *fenceB = NULL;
384 struct amdgpu_semaphore *semaphore = NULL; 385 struct amdgpu_semaphore *semaphore = NULL;
385 bool sigA, sigB; 386 bool sigA, sigB;
386 int i, r; 387 int i, r;
@@ -416,11 +417,11 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
416 417
417 mdelay(1000); 418 mdelay(1000);
418 419
419 if (amdgpu_fence_signaled(fenceA)) { 420 if (fence_is_signaled(fenceA)) {
420 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 421 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
421 goto out_cleanup; 422 goto out_cleanup;
422 } 423 }
423 if (amdgpu_fence_signaled(fenceB)) { 424 if (fence_is_signaled(fenceB)) {
424 DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 425 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
425 goto out_cleanup; 426 goto out_cleanup;
426 } 427 }
@@ -435,8 +436,8 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
435 436
436 for (i = 0; i < 30; ++i) { 437 for (i = 0; i < 30; ++i) {
437 mdelay(100); 438 mdelay(100);
438 sigA = amdgpu_fence_signaled(fenceA); 439 sigA = fence_is_signaled(fenceA);
439 sigB = amdgpu_fence_signaled(fenceB); 440 sigB = fence_is_signaled(fenceB);
440 if (sigA || sigB) 441 if (sigA || sigB)
441 break; 442 break;
442 } 443 }
@@ -461,12 +462,12 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
461 462
462 mdelay(1000); 463 mdelay(1000);
463 464
464 r = amdgpu_fence_wait(fenceA, false); 465 r = fence_wait(fenceA, false);
465 if (r) { 466 if (r) {
466 DRM_ERROR("Failed to wait for sync fence A\n"); 467 DRM_ERROR("Failed to wait for sync fence A\n");
467 goto out_cleanup; 468 goto out_cleanup;
468 } 469 }
469 r = amdgpu_fence_wait(fenceB, false); 470 r = fence_wait(fenceB, false);
470 if (r) { 471 if (r) {
471 DRM_ERROR("Failed to wait for sync fence B\n"); 472 DRM_ERROR("Failed to wait for sync fence B\n");
472 goto out_cleanup; 473 goto out_cleanup;
@@ -476,10 +477,10 @@ out_cleanup:
476 amdgpu_semaphore_free(adev, &semaphore, NULL); 477 amdgpu_semaphore_free(adev, &semaphore, NULL);
477 478
478 if (fenceA) 479 if (fenceA)
479 amdgpu_fence_unref(&fenceA); 480 fence_put(fenceA);
480 481
481 if (fenceB) 482 if (fenceB)
482 amdgpu_fence_unref(&fenceB); 483 fence_put(fenceB);
483 484
484 if (r) 485 if (r)
485 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 486 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..68369cf1e318 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -52,6 +52,7 @@
52#endif 52#endif
53#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 53#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
55#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
55 56
56/** 57/**
57 * amdgpu_uvd_cs_ctx - Command submission parser context 58 * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -81,6 +82,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
81#endif 82#endif
82MODULE_FIRMWARE(FIRMWARE_TONGA); 83MODULE_FIRMWARE(FIRMWARE_TONGA);
83MODULE_FIRMWARE(FIRMWARE_CARRIZO); 84MODULE_FIRMWARE(FIRMWARE_CARRIZO);
85MODULE_FIRMWARE(FIRMWARE_FIJI);
84 86
85static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); 87static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
86static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 88static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -116,6 +118,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
116 case CHIP_TONGA: 118 case CHIP_TONGA:
117 fw_name = FIRMWARE_TONGA; 119 fw_name = FIRMWARE_TONGA;
118 break; 120 break;
121 case CHIP_FIJI:
122 fw_name = FIRMWARE_FIJI;
123 break;
119 case CHIP_CARRIZO: 124 case CHIP_CARRIZO:
120 fw_name = FIRMWARE_CARRIZO; 125 fw_name = FIRMWARE_CARRIZO;
121 break; 126 break;
@@ -283,7 +288,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
283 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 288 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
284 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 289 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
285 if (handle != 0 && adev->uvd.filp[i] == filp) { 290 if (handle != 0 && adev->uvd.filp[i] == filp) {
286 struct amdgpu_fence *fence; 291 struct fence *fence;
287 292
288 amdgpu_uvd_note_usage(adev); 293 amdgpu_uvd_note_usage(adev);
289 294
@@ -293,8 +298,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
293 continue; 298 continue;
294 } 299 }
295 300
296 amdgpu_fence_wait(fence, false); 301 fence_wait(fence, false);
297 amdgpu_fence_unref(&fence); 302 fence_put(fence);
298 303
299 adev->uvd.filp[i] = NULL; 304 adev->uvd.filp[i] = NULL;
300 atomic_set(&adev->uvd.handles[i], 0); 305 atomic_set(&adev->uvd.handles[i], 0);
@@ -375,6 +380,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
375 unsigned fs_in_mb = width_in_mb * height_in_mb; 380 unsigned fs_in_mb = width_in_mb * height_in_mb;
376 381
377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 382 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
383 unsigned min_ctx_size = 0;
378 384
379 image_size = width * height; 385 image_size = width * height;
380 image_size += image_size / 2; 386 image_size += image_size / 2;
@@ -466,6 +472,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
466 472
467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 473 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
468 min_dpb_size = image_size * num_dpb_buffer; 474 min_dpb_size = image_size * num_dpb_buffer;
475 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
476 * 16 * num_dpb_buffer + 52 * 1024;
469 break; 477 break;
470 478
471 default: 479 default:
@@ -486,6 +494,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
486 494
487 buf_sizes[0x1] = dpb_size; 495 buf_sizes[0x1] = dpb_size;
488 buf_sizes[0x2] = image_size; 496 buf_sizes[0x2] = image_size;
497 buf_sizes[0x4] = min_ctx_size;
489 return 0; 498 return 0;
490} 499}
491 500
@@ -504,28 +513,25 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
504{ 513{
505 struct amdgpu_device *adev = ctx->parser->adev; 514 struct amdgpu_device *adev = ctx->parser->adev;
506 int32_t *msg, msg_type, handle; 515 int32_t *msg, msg_type, handle;
507 struct fence *f;
508 void *ptr; 516 void *ptr;
509 517 long r;
510 int i, r; 518 int i;
511 519
512 if (offset & 0x3F) { 520 if (offset & 0x3F) {
513 DRM_ERROR("UVD messages must be 64 byte aligned!\n"); 521 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
514 return -EINVAL; 522 return -EINVAL;
515 } 523 }
516 524
517 f = reservation_object_get_excl(bo->tbo.resv); 525 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
518 if (f) { 526 MAX_SCHEDULE_TIMEOUT);
519 r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); 527 if (r < 0) {
520 if (r) { 528 DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
521 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); 529 return r;
522 return r;
523 }
524 } 530 }
525 531
526 r = amdgpu_bo_kmap(bo, &ptr); 532 r = amdgpu_bo_kmap(bo, &ptr);
527 if (r) { 533 if (r) {
528 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); 534 DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
529 return r; 535 return r;
530 } 536 }
531 537
@@ -628,6 +634,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
628 return -EINVAL; 634 return -EINVAL;
629 } 635 }
630 636
637 } else if (cmd == 0x206) {
638 if ((end - start) < ctx->buf_sizes[4]) {
639 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
640 (unsigned)(end - start),
641 ctx->buf_sizes[4]);
642 return -EINVAL;
643 }
631 } else if ((cmd != 0x100) && (cmd != 0x204)) { 644 } else if ((cmd != 0x100) && (cmd != 0x204)) {
632 DRM_ERROR("invalid UVD command %X!\n", cmd); 645 DRM_ERROR("invalid UVD command %X!\n", cmd);
633 return -EINVAL; 646 return -EINVAL;
@@ -755,9 +768,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
755 struct amdgpu_uvd_cs_ctx ctx = {}; 768 struct amdgpu_uvd_cs_ctx ctx = {};
756 unsigned buf_sizes[] = { 769 unsigned buf_sizes[] = {
757 [0x00000000] = 2048, 770 [0x00000000] = 2048,
758 [0x00000001] = 32 * 1024 * 1024, 771 [0x00000001] = 0xFFFFFFFF,
759 [0x00000002] = 2048 * 1152 * 3, 772 [0x00000002] = 0xFFFFFFFF,
760 [0x00000003] = 2048, 773 [0x00000003] = 2048,
774 [0x00000004] = 0xFFFFFFFF,
761 }; 775 };
762 struct amdgpu_ib *ib = &parser->ibs[ib_idx]; 776 struct amdgpu_ib *ib = &parser->ibs[ib_idx];
763 int r; 777 int r;
@@ -792,14 +806,24 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
792 return 0; 806 return 0;
793} 807}
794 808
809static int amdgpu_uvd_free_job(
810 struct amdgpu_cs_parser *sched_job)
811{
812 amdgpu_ib_free(sched_job->adev, sched_job->ibs);
813 kfree(sched_job->ibs);
814 return 0;
815}
816
795static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 817static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
796 struct amdgpu_bo *bo, 818 struct amdgpu_bo *bo,
797 struct amdgpu_fence **fence) 819 struct fence **fence)
798{ 820{
799 struct ttm_validate_buffer tv; 821 struct ttm_validate_buffer tv;
800 struct ww_acquire_ctx ticket; 822 struct ww_acquire_ctx ticket;
801 struct list_head head; 823 struct list_head head;
802 struct amdgpu_ib ib; 824 struct amdgpu_ib *ib = NULL;
825 struct fence *f = NULL;
826 struct amdgpu_device *adev = ring->adev;
803 uint64_t addr; 827 uint64_t addr;
804 int i, r; 828 int i, r;
805 829
@@ -821,34 +845,49 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
821 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 845 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
822 if (r) 846 if (r)
823 goto err; 847 goto err;
824 848 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
825 r = amdgpu_ib_get(ring, NULL, 64, &ib); 849 if (!ib) {
826 if (r) 850 r = -ENOMEM;
827 goto err; 851 goto err;
852 }
853 r = amdgpu_ib_get(ring, NULL, 64, ib);
854 if (r)
855 goto err1;
828 856
829 addr = amdgpu_bo_gpu_offset(bo); 857 addr = amdgpu_bo_gpu_offset(bo);
830 ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); 858 ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
831 ib.ptr[1] = addr; 859 ib->ptr[1] = addr;
832 ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); 860 ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
833 ib.ptr[3] = addr >> 32; 861 ib->ptr[3] = addr >> 32;
834 ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); 862 ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
835 ib.ptr[5] = 0; 863 ib->ptr[5] = 0;
836 for (i = 6; i < 16; ++i) 864 for (i = 6; i < 16; ++i)
837 ib.ptr[i] = PACKET2(0); 865 ib->ptr[i] = PACKET2(0);
838 ib.length_dw = 16; 866 ib->length_dw = 16;
839 867
840 r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 868 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
869 &amdgpu_uvd_free_job,
870 AMDGPU_FENCE_OWNER_UNDEFINED,
871 &f);
841 if (r) 872 if (r)
842 goto err; 873 goto err2;
843 ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
844 874
845 if (fence) 875 ttm_eu_fence_buffer_objects(&ticket, &head, f);
846 *fence = amdgpu_fence_ref(ib.fence);
847 876
848 amdgpu_ib_free(ring->adev, &ib); 877 if (fence)
878 *fence = fence_get(f);
849 amdgpu_bo_unref(&bo); 879 amdgpu_bo_unref(&bo);
850 return 0; 880 fence_put(f);
881 if (amdgpu_enable_scheduler)
882 return 0;
851 883
884 amdgpu_ib_free(ring->adev, ib);
885 kfree(ib);
886 return 0;
887err2:
888 amdgpu_ib_free(ring->adev, ib);
889err1:
890 kfree(ib);
852err: 891err:
853 ttm_eu_backoff_reservation(&ticket, &head); 892 ttm_eu_backoff_reservation(&ticket, &head);
854 return r; 893 return r;
@@ -858,7 +897,7 @@ err:
858 crash the vcpu so just try to emmit a dummy create/destroy msg to 897 crash the vcpu so just try to emmit a dummy create/destroy msg to
859 avoid this */ 898 avoid this */
860int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 899int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
861 struct amdgpu_fence **fence) 900 struct fence **fence)
862{ 901{
863 struct amdgpu_device *adev = ring->adev; 902 struct amdgpu_device *adev = ring->adev;
864 struct amdgpu_bo *bo; 903 struct amdgpu_bo *bo;
@@ -905,7 +944,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
905} 944}
906 945
907int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 946int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
908 struct amdgpu_fence **fence) 947 struct fence **fence)
909{ 948{
910 struct amdgpu_device *adev = ring->adev; 949 struct amdgpu_device *adev = ring->adev;
911 struct amdgpu_bo *bo; 950 struct amdgpu_bo *bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 2255aa710e33..1724c2c86151 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
29int amdgpu_uvd_suspend(struct amdgpu_device *adev); 29int amdgpu_uvd_suspend(struct amdgpu_device *adev);
30int amdgpu_uvd_resume(struct amdgpu_device *adev); 30int amdgpu_uvd_resume(struct amdgpu_device *adev);
31int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 31int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
32 struct amdgpu_fence **fence); 32 struct fence **fence);
33int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 33int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
34 struct amdgpu_fence **fence); 34 struct fence **fence);
35void amdgpu_uvd_free_handles(struct amdgpu_device *adev, 35void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
36 struct drm_file *filp); 36 struct drm_file *filp);
37int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); 37int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d3ca73090e39..33ee6ae28f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -48,6 +48,7 @@
48#endif 48#endif
49#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 49#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 50#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
51 52
52#ifdef CONFIG_DRM_AMDGPU_CIK 53#ifdef CONFIG_DRM_AMDGPU_CIK
53MODULE_FIRMWARE(FIRMWARE_BONAIRE); 54MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -58,6 +59,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
58#endif 59#endif
59MODULE_FIRMWARE(FIRMWARE_TONGA); 60MODULE_FIRMWARE(FIRMWARE_TONGA);
60MODULE_FIRMWARE(FIRMWARE_CARRIZO); 61MODULE_FIRMWARE(FIRMWARE_CARRIZO);
62MODULE_FIRMWARE(FIRMWARE_FIJI);
61 63
62static void amdgpu_vce_idle_work_handler(struct work_struct *work); 64static void amdgpu_vce_idle_work_handler(struct work_struct *work);
63 65
@@ -101,6 +103,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
101 case CHIP_CARRIZO: 103 case CHIP_CARRIZO:
102 fw_name = FIRMWARE_CARRIZO; 104 fw_name = FIRMWARE_CARRIZO;
103 break; 105 break;
106 case CHIP_FIJI:
107 fw_name = FIRMWARE_FIJI;
108 break;
104 109
105 default: 110 default:
106 return -EINVAL; 111 return -EINVAL;
@@ -334,6 +339,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
334 } 339 }
335} 340}
336 341
342static int amdgpu_vce_free_job(
343 struct amdgpu_cs_parser *sched_job)
344{
345 amdgpu_ib_free(sched_job->adev, sched_job->ibs);
346 kfree(sched_job->ibs);
347 return 0;
348}
349
337/** 350/**
338 * amdgpu_vce_get_create_msg - generate a VCE create msg 351 * amdgpu_vce_get_create_msg - generate a VCE create msg
339 * 352 *
@@ -345,59 +358,69 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
345 * Open up a stream for HW test 358 * Open up a stream for HW test
346 */ 359 */
347int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 360int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
348 struct amdgpu_fence **fence) 361 struct fence **fence)
349{ 362{
350 const unsigned ib_size_dw = 1024; 363 const unsigned ib_size_dw = 1024;
351 struct amdgpu_ib ib; 364 struct amdgpu_ib *ib = NULL;
365 struct fence *f = NULL;
366 struct amdgpu_device *adev = ring->adev;
352 uint64_t dummy; 367 uint64_t dummy;
353 int i, r; 368 int i, r;
354 369
355 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); 370 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
371 if (!ib)
372 return -ENOMEM;
373 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
356 if (r) { 374 if (r) {
357 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 375 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
376 kfree(ib);
358 return r; 377 return r;
359 } 378 }
360 379
361 dummy = ib.gpu_addr + 1024; 380 dummy = ib->gpu_addr + 1024;
362 381
363 /* stitch together an VCE create msg */ 382 /* stitch together an VCE create msg */
364 ib.length_dw = 0; 383 ib->length_dw = 0;
365 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 384 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
366 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 385 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
367 ib.ptr[ib.length_dw++] = handle; 386 ib->ptr[ib->length_dw++] = handle;
368 387
369 ib.ptr[ib.length_dw++] = 0x00000030; /* len */ 388 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
370 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ 389 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
371 ib.ptr[ib.length_dw++] = 0x00000000; 390 ib->ptr[ib->length_dw++] = 0x00000000;
372 ib.ptr[ib.length_dw++] = 0x00000042; 391 ib->ptr[ib->length_dw++] = 0x00000042;
373 ib.ptr[ib.length_dw++] = 0x0000000a; 392 ib->ptr[ib->length_dw++] = 0x0000000a;
374 ib.ptr[ib.length_dw++] = 0x00000001; 393 ib->ptr[ib->length_dw++] = 0x00000001;
375 ib.ptr[ib.length_dw++] = 0x00000080; 394 ib->ptr[ib->length_dw++] = 0x00000080;
376 ib.ptr[ib.length_dw++] = 0x00000060; 395 ib->ptr[ib->length_dw++] = 0x00000060;
377 ib.ptr[ib.length_dw++] = 0x00000100; 396 ib->ptr[ib->length_dw++] = 0x00000100;
378 ib.ptr[ib.length_dw++] = 0x00000100; 397 ib->ptr[ib->length_dw++] = 0x00000100;
379 ib.ptr[ib.length_dw++] = 0x0000000c; 398 ib->ptr[ib->length_dw++] = 0x0000000c;
380 ib.ptr[ib.length_dw++] = 0x00000000; 399 ib->ptr[ib->length_dw++] = 0x00000000;
381 400
382 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 401 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
383 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 402 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
384 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 403 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
385 ib.ptr[ib.length_dw++] = dummy; 404 ib->ptr[ib->length_dw++] = dummy;
386 ib.ptr[ib.length_dw++] = 0x00000001; 405 ib->ptr[ib->length_dw++] = 0x00000001;
387 406
388 for (i = ib.length_dw; i < ib_size_dw; ++i) 407 for (i = ib->length_dw; i < ib_size_dw; ++i)
389 ib.ptr[i] = 0x0; 408 ib->ptr[i] = 0x0;
390 409
391 r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 410 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
392 if (r) { 411 &amdgpu_vce_free_job,
393 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 412 AMDGPU_FENCE_OWNER_UNDEFINED,
394 } 413 &f);
395 414 if (r)
415 goto err;
396 if (fence) 416 if (fence)
397 *fence = amdgpu_fence_ref(ib.fence); 417 *fence = fence_get(f);
398 418 fence_put(f);
399 amdgpu_ib_free(ring->adev, &ib); 419 if (amdgpu_enable_scheduler)
400 420 return 0;
421err:
422 amdgpu_ib_free(adev, ib);
423 kfree(ib);
401 return r; 424 return r;
402} 425}
403 426
@@ -412,49 +435,59 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
412 * Close up a stream for HW test or if userspace failed to do so 435 * Close up a stream for HW test or if userspace failed to do so
413 */ 436 */
414int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 437int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
415 struct amdgpu_fence **fence) 438 struct fence **fence)
416{ 439{
417 const unsigned ib_size_dw = 1024; 440 const unsigned ib_size_dw = 1024;
418 struct amdgpu_ib ib; 441 struct amdgpu_ib *ib = NULL;
442 struct fence *f = NULL;
443 struct amdgpu_device *adev = ring->adev;
419 uint64_t dummy; 444 uint64_t dummy;
420 int i, r; 445 int i, r;
421 446
422 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); 447 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
448 if (!ib)
449 return -ENOMEM;
450
451 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
423 if (r) { 452 if (r) {
453 kfree(ib);
424 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 454 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
425 return r; 455 return r;
426 } 456 }
427 457
428 dummy = ib.gpu_addr + 1024; 458 dummy = ib->gpu_addr + 1024;
429 459
430 /* stitch together an VCE destroy msg */ 460 /* stitch together an VCE destroy msg */
431 ib.length_dw = 0; 461 ib->length_dw = 0;
432 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 462 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
433 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 463 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
434 ib.ptr[ib.length_dw++] = handle; 464 ib->ptr[ib->length_dw++] = handle;
435 465
436 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 466 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
437 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 467 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
438 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 468 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
439 ib.ptr[ib.length_dw++] = dummy; 469 ib->ptr[ib->length_dw++] = dummy;
440 ib.ptr[ib.length_dw++] = 0x00000001; 470 ib->ptr[ib->length_dw++] = 0x00000001;
441 471
442 ib.ptr[ib.length_dw++] = 0x00000008; /* len */ 472 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
443 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ 473 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
444 474
445 for (i = ib.length_dw; i < ib_size_dw; ++i) 475 for (i = ib->length_dw; i < ib_size_dw; ++i)
446 ib.ptr[i] = 0x0; 476 ib->ptr[i] = 0x0;
447 477 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
448 r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 478 &amdgpu_vce_free_job,
449 if (r) { 479 AMDGPU_FENCE_OWNER_UNDEFINED,
450 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 480 &f);
451 } 481 if (r)
452 482 goto err;
453 if (fence) 483 if (fence)
454 *fence = amdgpu_fence_ref(ib.fence); 484 *fence = fence_get(f);
455 485 fence_put(f);
456 amdgpu_ib_free(ring->adev, &ib); 486 if (amdgpu_enable_scheduler)
457 487 return 0;
488err:
489 amdgpu_ib_free(adev, ib);
490 kfree(ib);
458 return r; 491 return r;
459} 492}
460 493
@@ -800,7 +833,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
800 */ 833 */
801int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) 834int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
802{ 835{
803 struct amdgpu_fence *fence = NULL; 836 struct fence *fence = NULL;
804 int r; 837 int r;
805 838
806 r = amdgpu_vce_get_create_msg(ring, 1, NULL); 839 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
@@ -815,13 +848,13 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
815 goto error; 848 goto error;
816 } 849 }
817 850
818 r = amdgpu_fence_wait(fence, false); 851 r = fence_wait(fence, false);
819 if (r) { 852 if (r) {
820 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 853 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
821 } else { 854 } else {
822 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 855 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
823 } 856 }
824error: 857error:
825 amdgpu_fence_unref(&fence); 858 fence_put(fence);
826 return r; 859 return r;
827} 860}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 7ccdb5927da5..ba2da8ee5906 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
29int amdgpu_vce_suspend(struct amdgpu_device *adev); 29int amdgpu_vce_suspend(struct amdgpu_device *adev);
30int amdgpu_vce_resume(struct amdgpu_device *adev); 30int amdgpu_vce_resume(struct amdgpu_device *adev);
31int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 31int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
32 struct amdgpu_fence **fence); 32 struct fence **fence);
33int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 33int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
34 struct amdgpu_fence **fence); 34 struct fence **fence);
35void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); 35void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
36int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); 36int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
37bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, 37bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9a4e3b63f1cb..a78a206e176e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
127/** 127/**
128 * amdgpu_vm_grab_id - allocate the next free VMID 128 * amdgpu_vm_grab_id - allocate the next free VMID
129 * 129 *
130 * @ring: ring we want to submit job to
131 * @vm: vm to allocate id for 130 * @vm: vm to allocate id for
131 * @ring: ring we want to submit job to
132 * @sync: sync object where we add dependencies
132 * 133 *
133 * Allocate an id for the vm (cayman+). 134 * Allocate an id for the vm, adding fences to the sync obj as necessary.
134 * Returns the fence we need to sync to (if any).
135 * 135 *
136 * Global and local mutex must be locked! 136 * Global mutex must be locked!
137 */ 137 */
138struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, 138int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
139 struct amdgpu_vm *vm) 139 struct amdgpu_sync *sync)
140{ 140{
141 struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; 141 struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
142 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 142 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
148 /* check if the id is still valid */ 148 /* check if the id is still valid */
149 if (vm_id->id && vm_id->last_id_use && 149 if (vm_id->id && vm_id->last_id_use &&
150 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) 150 vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
151 return NULL; 151 return 0;
152 152
153 /* we definately need to flush */ 153 /* we definately need to flush */
154 vm_id->pd_gpu_addr = ~0ll; 154 vm_id->pd_gpu_addr = ~0ll;
@@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
161 /* found a free one */ 161 /* found a free one */
162 vm_id->id = i; 162 vm_id->id = i;
163 trace_amdgpu_vm_grab_id(i, ring->idx); 163 trace_amdgpu_vm_grab_id(i, ring->idx);
164 return NULL; 164 return 0;
165 } 165 }
166 166
167 if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { 167 if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
172 172
173 for (i = 0; i < 2; ++i) { 173 for (i = 0; i < 2; ++i) {
174 if (choices[i]) { 174 if (choices[i]) {
175 struct amdgpu_fence *fence;
176
177 fence = adev->vm_manager.active[choices[i]];
175 vm_id->id = choices[i]; 178 vm_id->id = choices[i];
179
176 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 180 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
177 return adev->vm_manager.active[choices[i]]; 181 return amdgpu_sync_fence(ring->adev, sync, &fence->base);
178 } 182 }
179 } 183 }
180 184
181 /* should never happen */ 185 /* should never happen */
182 BUG(); 186 BUG();
183 return NULL; 187 return -EINVAL;
184} 188}
185 189
186/** 190/**
@@ -200,13 +204,15 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
200{ 204{
201 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 205 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
202 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 206 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
207 struct amdgpu_fence *flushed_updates = vm_id->flushed_updates;
203 208
204 if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || 209 if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
205 amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) { 210 (updates && amdgpu_fence_is_earlier(flushed_updates, updates))) {
206 211
207 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 212 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
208 amdgpu_fence_unref(&vm_id->flushed_updates); 213 vm_id->flushed_updates = amdgpu_fence_ref(
209 vm_id->flushed_updates = amdgpu_fence_ref(updates); 214 amdgpu_fence_later(flushed_updates, updates));
215 amdgpu_fence_unref(&flushed_updates);
210 vm_id->pd_gpu_addr = pd_addr; 216 vm_id->pd_gpu_addr = pd_addr;
211 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 217 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
212 } 218 }
@@ -300,6 +306,16 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
300 } 306 }
301} 307}
302 308
309static int amdgpu_vm_free_job(
310 struct amdgpu_cs_parser *sched_job)
311{
312 int i;
313 for (i = 0; i < sched_job->num_ibs; i++)
314 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
315 kfree(sched_job->ibs);
316 return 0;
317}
318
303/** 319/**
304 * amdgpu_vm_clear_bo - initially clear the page dir/table 320 * amdgpu_vm_clear_bo - initially clear the page dir/table
305 * 321 *
@@ -310,7 +326,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
310 struct amdgpu_bo *bo) 326 struct amdgpu_bo *bo)
311{ 327{
312 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 328 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
313 struct amdgpu_ib ib; 329 struct fence *fence = NULL;
330 struct amdgpu_ib *ib;
314 unsigned entries; 331 unsigned entries;
315 uint64_t addr; 332 uint64_t addr;
316 int r; 333 int r;
@@ -330,24 +347,33 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
330 addr = amdgpu_bo_gpu_offset(bo); 347 addr = amdgpu_bo_gpu_offset(bo);
331 entries = amdgpu_bo_size(bo) / 8; 348 entries = amdgpu_bo_size(bo) / 8;
332 349
333 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib); 350 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
334 if (r) 351 if (!ib)
335 goto error_unreserve; 352 goto error_unreserve;
336 353
337 ib.length_dw = 0; 354 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
338
339 amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
340 amdgpu_vm_pad_ib(adev, &ib);
341 WARN_ON(ib.length_dw > 64);
342
343 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
344 if (r) 355 if (r)
345 goto error_free; 356 goto error_free;
346 357
347 amdgpu_bo_fence(bo, ib.fence, true); 358 ib->length_dw = 0;
348 359
360 amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
361 amdgpu_vm_pad_ib(adev, ib);
362 WARN_ON(ib->length_dw > 64);
363 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
364 &amdgpu_vm_free_job,
365 AMDGPU_FENCE_OWNER_VM,
366 &fence);
367 if (!r)
368 amdgpu_bo_fence(bo, fence, true);
369 fence_put(fence);
370 if (amdgpu_enable_scheduler) {
371 amdgpu_bo_unreserve(bo);
372 return 0;
373 }
349error_free: 374error_free:
350 amdgpu_ib_free(adev, &ib); 375 amdgpu_ib_free(adev, ib);
376 kfree(ib);
351 377
352error_unreserve: 378error_unreserve:
353 amdgpu_bo_unreserve(bo); 379 amdgpu_bo_unreserve(bo);
@@ -400,7 +426,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
400 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; 426 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
401 uint64_t last_pde = ~0, last_pt = ~0; 427 uint64_t last_pde = ~0, last_pt = ~0;
402 unsigned count = 0, pt_idx, ndw; 428 unsigned count = 0, pt_idx, ndw;
403 struct amdgpu_ib ib; 429 struct amdgpu_ib *ib;
430 struct fence *fence = NULL;
431
404 int r; 432 int r;
405 433
406 /* padding, etc. */ 434 /* padding, etc. */
@@ -413,10 +441,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
413 if (ndw > 0xfffff) 441 if (ndw > 0xfffff)
414 return -ENOMEM; 442 return -ENOMEM;
415 443
416 r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); 444 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
445 if (!ib)
446 return -ENOMEM;
447
448 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
417 if (r) 449 if (r)
418 return r; 450 return r;
419 ib.length_dw = 0; 451 ib->length_dw = 0;
420 452
421 /* walk over the address space and update the page directory */ 453 /* walk over the address space and update the page directory */
422 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 454 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -436,7 +468,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
436 ((last_pt + incr * count) != pt)) { 468 ((last_pt + incr * count) != pt)) {
437 469
438 if (count) { 470 if (count) {
439 amdgpu_vm_update_pages(adev, &ib, last_pde, 471 amdgpu_vm_update_pages(adev, ib, last_pde,
440 last_pt, count, incr, 472 last_pt, count, incr,
441 AMDGPU_PTE_VALID, 0); 473 AMDGPU_PTE_VALID, 0);
442 } 474 }
@@ -450,23 +482,37 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
450 } 482 }
451 483
452 if (count) 484 if (count)
453 amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count, 485 amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
454 incr, AMDGPU_PTE_VALID, 0); 486 incr, AMDGPU_PTE_VALID, 0);
455 487
456 if (ib.length_dw != 0) { 488 if (ib->length_dw != 0) {
457 amdgpu_vm_pad_ib(adev, &ib); 489 amdgpu_vm_pad_ib(adev, ib);
458 amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); 490 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
459 WARN_ON(ib.length_dw > ndw); 491 WARN_ON(ib->length_dw > ndw);
460 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); 492 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
461 if (r) { 493 &amdgpu_vm_free_job,
462 amdgpu_ib_free(adev, &ib); 494 AMDGPU_FENCE_OWNER_VM,
463 return r; 495 &fence);
464 } 496 if (r)
465 amdgpu_bo_fence(pd, ib.fence, true); 497 goto error_free;
498
499 amdgpu_bo_fence(pd, fence, true);
500 fence_put(vm->page_directory_fence);
501 vm->page_directory_fence = fence_get(fence);
502 fence_put(fence);
503 }
504
505 if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
506 amdgpu_ib_free(adev, ib);
507 kfree(ib);
466 } 508 }
467 amdgpu_ib_free(adev, &ib);
468 509
469 return 0; 510 return 0;
511
512error_free:
513 amdgpu_ib_free(adev, ib);
514 kfree(ib);
515 return r;
470} 516}
471 517
472/** 518/**
@@ -640,7 +686,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
640 */ 686 */
641static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, 687static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
642 uint64_t start, uint64_t end, 688 uint64_t start, uint64_t end,
643 struct amdgpu_fence *fence) 689 struct fence *fence)
644{ 690{
645 unsigned i; 691 unsigned i;
646 692
@@ -670,12 +716,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
670 struct amdgpu_vm *vm, 716 struct amdgpu_vm *vm,
671 struct amdgpu_bo_va_mapping *mapping, 717 struct amdgpu_bo_va_mapping *mapping,
672 uint64_t addr, uint32_t gtt_flags, 718 uint64_t addr, uint32_t gtt_flags,
673 struct amdgpu_fence **fence) 719 struct fence **fence)
674{ 720{
675 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 721 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
676 unsigned nptes, ncmds, ndw; 722 unsigned nptes, ncmds, ndw;
677 uint32_t flags = gtt_flags; 723 uint32_t flags = gtt_flags;
678 struct amdgpu_ib ib; 724 struct amdgpu_ib *ib;
725 struct fence *f = NULL;
679 int r; 726 int r;
680 727
681 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 728 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -722,46 +769,65 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
722 if (ndw > 0xfffff) 769 if (ndw > 0xfffff)
723 return -ENOMEM; 770 return -ENOMEM;
724 771
725 r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); 772 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
726 if (r) 773 if (!ib)
774 return -ENOMEM;
775
776 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
777 if (r) {
778 kfree(ib);
727 return r; 779 return r;
728 ib.length_dw = 0; 780 }
781
782 ib->length_dw = 0;
729 783
730 if (!(flags & AMDGPU_PTE_VALID)) { 784 if (!(flags & AMDGPU_PTE_VALID)) {
731 unsigned i; 785 unsigned i;
732 786
733 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 787 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
734 struct amdgpu_fence *f = vm->ids[i].last_id_use; 788 struct amdgpu_fence *f = vm->ids[i].last_id_use;
735 amdgpu_sync_fence(&ib.sync, f); 789 r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
790 if (r)
791 return r;
736 } 792 }
737 } 793 }
738 794
739 r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start, 795 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
740 mapping->it.last + 1, addr + mapping->offset, 796 mapping->it.last + 1, addr + mapping->offset,
741 flags, gtt_flags); 797 flags, gtt_flags);
742 798
743 if (r) { 799 if (r) {
744 amdgpu_ib_free(adev, &ib); 800 amdgpu_ib_free(adev, ib);
801 kfree(ib);
745 return r; 802 return r;
746 } 803 }
747 804
748 amdgpu_vm_pad_ib(adev, &ib); 805 amdgpu_vm_pad_ib(adev, ib);
749 WARN_ON(ib.length_dw > ndw); 806 WARN_ON(ib->length_dw > ndw);
807 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
808 &amdgpu_vm_free_job,
809 AMDGPU_FENCE_OWNER_VM,
810 &f);
811 if (r)
812 goto error_free;
750 813
751 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
752 if (r) {
753 amdgpu_ib_free(adev, &ib);
754 return r;
755 }
756 amdgpu_vm_fence_pts(vm, mapping->it.start, 814 amdgpu_vm_fence_pts(vm, mapping->it.start,
757 mapping->it.last + 1, ib.fence); 815 mapping->it.last + 1, f);
758 if (fence) { 816 if (fence) {
759 amdgpu_fence_unref(fence); 817 fence_put(*fence);
760 *fence = amdgpu_fence_ref(ib.fence); 818 *fence = fence_get(f);
819 }
820 fence_put(f);
821 if (!amdgpu_enable_scheduler) {
822 amdgpu_ib_free(adev, ib);
823 kfree(ib);
761 } 824 }
762 amdgpu_ib_free(adev, &ib);
763
764 return 0; 825 return 0;
826
827error_free:
828 amdgpu_ib_free(adev, ib);
829 kfree(ib);
830 return r;
765} 831}
766 832
767/** 833/**
@@ -794,21 +860,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
794 addr = 0; 860 addr = 0;
795 } 861 }
796 862
797 if (addr == bo_va->addr)
798 return 0;
799
800 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 863 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
801 864
802 list_for_each_entry(mapping, &bo_va->mappings, list) { 865 spin_lock(&vm->status_lock);
866 if (!list_empty(&bo_va->vm_status))
867 list_splice_init(&bo_va->valids, &bo_va->invalids);
868 spin_unlock(&vm->status_lock);
869
870 list_for_each_entry(mapping, &bo_va->invalids, list) {
803 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, 871 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
804 flags, &bo_va->last_pt_update); 872 flags, &bo_va->last_pt_update);
805 if (r) 873 if (r)
806 return r; 874 return r;
807 } 875 }
808 876
809 bo_va->addr = addr;
810 spin_lock(&vm->status_lock); 877 spin_lock(&vm->status_lock);
878 list_splice_init(&bo_va->invalids, &bo_va->valids);
811 list_del_init(&bo_va->vm_status); 879 list_del_init(&bo_va->vm_status);
880 if (!mem)
881 list_add(&bo_va->vm_status, &vm->cleared);
812 spin_unlock(&vm->status_lock); 882 spin_unlock(&vm->status_lock);
813 883
814 return 0; 884 return 0;
@@ -861,7 +931,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
861 struct amdgpu_vm *vm, struct amdgpu_sync *sync) 931 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
862{ 932{
863 struct amdgpu_bo_va *bo_va = NULL; 933 struct amdgpu_bo_va *bo_va = NULL;
864 int r; 934 int r = 0;
865 935
866 spin_lock(&vm->status_lock); 936 spin_lock(&vm->status_lock);
867 while (!list_empty(&vm->invalidated)) { 937 while (!list_empty(&vm->invalidated)) {
@@ -878,8 +948,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
878 spin_unlock(&vm->status_lock); 948 spin_unlock(&vm->status_lock);
879 949
880 if (bo_va) 950 if (bo_va)
881 amdgpu_sync_fence(sync, bo_va->last_pt_update); 951 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
882 return 0; 952
953 return r;
883} 954}
884 955
885/** 956/**
@@ -907,10 +978,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
907 } 978 }
908 bo_va->vm = vm; 979 bo_va->vm = vm;
909 bo_va->bo = bo; 980 bo_va->bo = bo;
910 bo_va->addr = 0;
911 bo_va->ref_count = 1; 981 bo_va->ref_count = 1;
912 INIT_LIST_HEAD(&bo_va->bo_list); 982 INIT_LIST_HEAD(&bo_va->bo_list);
913 INIT_LIST_HEAD(&bo_va->mappings); 983 INIT_LIST_HEAD(&bo_va->valids);
984 INIT_LIST_HEAD(&bo_va->invalids);
914 INIT_LIST_HEAD(&bo_va->vm_status); 985 INIT_LIST_HEAD(&bo_va->vm_status);
915 986
916 mutex_lock(&vm->mutex); 987 mutex_lock(&vm->mutex);
@@ -999,12 +1070,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
999 mapping->offset = offset; 1070 mapping->offset = offset;
1000 mapping->flags = flags; 1071 mapping->flags = flags;
1001 1072
1002 list_add(&mapping->list, &bo_va->mappings); 1073 list_add(&mapping->list, &bo_va->invalids);
1003 interval_tree_insert(&mapping->it, &vm->va); 1074 interval_tree_insert(&mapping->it, &vm->va);
1004 trace_amdgpu_vm_bo_map(bo_va, mapping); 1075 trace_amdgpu_vm_bo_map(bo_va, mapping);
1005 1076
1006 bo_va->addr = 0;
1007
1008 /* Make sure the page tables are allocated */ 1077 /* Make sure the page tables are allocated */
1009 saddr >>= amdgpu_vm_block_size; 1078 saddr >>= amdgpu_vm_block_size;
1010 eaddr >>= amdgpu_vm_block_size; 1079 eaddr >>= amdgpu_vm_block_size;
@@ -1085,17 +1154,27 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1085{ 1154{
1086 struct amdgpu_bo_va_mapping *mapping; 1155 struct amdgpu_bo_va_mapping *mapping;
1087 struct amdgpu_vm *vm = bo_va->vm; 1156 struct amdgpu_vm *vm = bo_va->vm;
1157 bool valid = true;
1088 1158
1089 saddr /= AMDGPU_GPU_PAGE_SIZE; 1159 saddr /= AMDGPU_GPU_PAGE_SIZE;
1090 1160
1091 list_for_each_entry(mapping, &bo_va->mappings, list) { 1161 list_for_each_entry(mapping, &bo_va->valids, list) {
1092 if (mapping->it.start == saddr) 1162 if (mapping->it.start == saddr)
1093 break; 1163 break;
1094 } 1164 }
1095 1165
1096 if (&mapping->list == &bo_va->mappings) { 1166 if (&mapping->list == &bo_va->valids) {
1097 amdgpu_bo_unreserve(bo_va->bo); 1167 valid = false;
1098 return -ENOENT; 1168
1169 list_for_each_entry(mapping, &bo_va->invalids, list) {
1170 if (mapping->it.start == saddr)
1171 break;
1172 }
1173
1174 if (&mapping->list == &bo_va->invalids) {
1175 amdgpu_bo_unreserve(bo_va->bo);
1176 return -ENOENT;
1177 }
1099 } 1178 }
1100 1179
1101 mutex_lock(&vm->mutex); 1180 mutex_lock(&vm->mutex);
@@ -1103,12 +1182,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1103 interval_tree_remove(&mapping->it, &vm->va); 1182 interval_tree_remove(&mapping->it, &vm->va);
1104 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1183 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1105 1184
1106 if (bo_va->addr) { 1185 if (valid)
1107 /* clear the old address */
1108 list_add(&mapping->list, &vm->freed); 1186 list_add(&mapping->list, &vm->freed);
1109 } else { 1187 else
1110 kfree(mapping); 1188 kfree(mapping);
1111 }
1112 mutex_unlock(&vm->mutex); 1189 mutex_unlock(&vm->mutex);
1113 amdgpu_bo_unreserve(bo_va->bo); 1190 amdgpu_bo_unreserve(bo_va->bo);
1114 1191
@@ -1139,16 +1216,19 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1139 list_del(&bo_va->vm_status); 1216 list_del(&bo_va->vm_status);
1140 spin_unlock(&vm->status_lock); 1217 spin_unlock(&vm->status_lock);
1141 1218
1142 list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { 1219 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1143 list_del(&mapping->list); 1220 list_del(&mapping->list);
1144 interval_tree_remove(&mapping->it, &vm->va); 1221 interval_tree_remove(&mapping->it, &vm->va);
1145 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1222 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1146 if (bo_va->addr) 1223 list_add(&mapping->list, &vm->freed);
1147 list_add(&mapping->list, &vm->freed);
1148 else
1149 kfree(mapping);
1150 } 1224 }
1151 amdgpu_fence_unref(&bo_va->last_pt_update); 1225 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1226 list_del(&mapping->list);
1227 interval_tree_remove(&mapping->it, &vm->va);
1228 kfree(mapping);
1229 }
1230
1231 fence_put(bo_va->last_pt_update);
1152 kfree(bo_va); 1232 kfree(bo_va);
1153 1233
1154 mutex_unlock(&vm->mutex); 1234 mutex_unlock(&vm->mutex);
@@ -1169,12 +1249,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1169 struct amdgpu_bo_va *bo_va; 1249 struct amdgpu_bo_va *bo_va;
1170 1250
1171 list_for_each_entry(bo_va, &bo->va, bo_list) { 1251 list_for_each_entry(bo_va, &bo->va, bo_list) {
1172 if (bo_va->addr) { 1252 spin_lock(&bo_va->vm->status_lock);
1173 spin_lock(&bo_va->vm->status_lock); 1253 if (list_empty(&bo_va->vm_status))
1174 list_del(&bo_va->vm_status);
1175 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1254 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1176 spin_unlock(&bo_va->vm->status_lock); 1255 spin_unlock(&bo_va->vm->status_lock);
1177 }
1178 } 1256 }
1179} 1257}
1180 1258
@@ -1202,6 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1202 vm->va = RB_ROOT; 1280 vm->va = RB_ROOT;
1203 spin_lock_init(&vm->status_lock); 1281 spin_lock_init(&vm->status_lock);
1204 INIT_LIST_HEAD(&vm->invalidated); 1282 INIT_LIST_HEAD(&vm->invalidated);
1283 INIT_LIST_HEAD(&vm->cleared);
1205 INIT_LIST_HEAD(&vm->freed); 1284 INIT_LIST_HEAD(&vm->freed);
1206 1285
1207 pd_size = amdgpu_vm_directory_size(adev); 1286 pd_size = amdgpu_vm_directory_size(adev);
@@ -1215,6 +1294,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1215 return -ENOMEM; 1294 return -ENOMEM;
1216 } 1295 }
1217 1296
1297 vm->page_directory_fence = NULL;
1298
1218 r = amdgpu_bo_create(adev, pd_size, align, true, 1299 r = amdgpu_bo_create(adev, pd_size, align, true,
1219 AMDGPU_GEM_DOMAIN_VRAM, 0, 1300 AMDGPU_GEM_DOMAIN_VRAM, 0,
1220 NULL, &vm->page_directory); 1301 NULL, &vm->page_directory);
@@ -1263,6 +1344,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1263 kfree(vm->page_tables); 1344 kfree(vm->page_tables);
1264 1345
1265 amdgpu_bo_unref(&vm->page_directory); 1346 amdgpu_bo_unref(&vm->page_directory);
1347 fence_put(vm->page_directory_fence);
1266 1348
1267 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1349 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1268 amdgpu_fence_unref(&vm->ids[i].flushed_updates); 1350 amdgpu_fence_unref(&vm->ids[i].flushed_updates);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index ae8caca61e04..cd6edc40c9cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -812,7 +812,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
812 else 812 else
813 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 813 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
814 814
815 if ((adev->flags & AMDGPU_IS_APU) && 815 if ((adev->flags & AMD_IS_APU) &&
816 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { 816 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
817 if (is_dp || 817 if (is_dp ||
818 !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) { 818 !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b3b66a0d5ff7..4b6ce74753cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -838,7 +838,7 @@ static u32 cik_get_xclk(struct amdgpu_device *adev)
838{ 838{
839 u32 reference_clock = adev->clock.spll.reference_freq; 839 u32 reference_clock = adev->clock.spll.reference_freq;
840 840
841 if (adev->flags & AMDGPU_IS_APU) { 841 if (adev->flags & AMD_IS_APU) {
842 if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK) 842 if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
843 return reference_clock / 2; 843 return reference_clock / 2;
844 } else { 844 } else {
@@ -1235,7 +1235,7 @@ static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
1235 if (reset_mask & AMDGPU_RESET_VMC) 1235 if (reset_mask & AMDGPU_RESET_VMC)
1236 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; 1236 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
1237 1237
1238 if (!(adev->flags & AMDGPU_IS_APU)) { 1238 if (!(adev->flags & AMD_IS_APU)) {
1239 if (reset_mask & AMDGPU_RESET_MC) 1239 if (reset_mask & AMDGPU_RESET_MC)
1240 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; 1240 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
1241 } 1241 }
@@ -1411,7 +1411,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
1411 dev_warn(adev->dev, "Wait for MC idle timed out !\n"); 1411 dev_warn(adev->dev, "Wait for MC idle timed out !\n");
1412 } 1412 }
1413 1413
1414 if (adev->flags & AMDGPU_IS_APU) 1414 if (adev->flags & AMD_IS_APU)
1415 kv_save_regs_for_reset(adev, &kv_save); 1415 kv_save_regs_for_reset(adev, &kv_save);
1416 1416
1417 /* disable BM */ 1417 /* disable BM */
@@ -1429,7 +1429,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
1429 } 1429 }
1430 1430
1431 /* does asic init need to be run first??? */ 1431 /* does asic init need to be run first??? */
1432 if (adev->flags & AMDGPU_IS_APU) 1432 if (adev->flags & AMD_IS_APU)
1433 kv_restore_regs_for_reset(adev, &kv_save); 1433 kv_restore_regs_for_reset(adev, &kv_save);
1434} 1434}
1435 1435
@@ -1570,7 +1570,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1570 if (amdgpu_pcie_gen2 == 0) 1570 if (amdgpu_pcie_gen2 == 0)
1571 return; 1571 return;
1572 1572
1573 if (adev->flags & AMDGPU_IS_APU) 1573 if (adev->flags & AMD_IS_APU)
1574 return; 1574 return;
1575 1575
1576 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1576 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -1730,7 +1730,7 @@ static void cik_program_aspm(struct amdgpu_device *adev)
1730 return; 1730 return;
1731 1731
1732 /* XXX double check APUs */ 1732 /* XXX double check APUs */
1733 if (adev->flags & AMDGPU_IS_APU) 1733 if (adev->flags & AMD_IS_APU)
1734 return; 1734 return;
1735 1735
1736 orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1736 orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 15df46c93f0a..2b4242b39b0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -614,6 +614,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
614{ 614{
615 struct amdgpu_device *adev = ring->adev; 615 struct amdgpu_device *adev = ring->adev;
616 struct amdgpu_ib ib; 616 struct amdgpu_ib ib;
617 struct fence *f = NULL;
617 unsigned i; 618 unsigned i;
618 unsigned index; 619 unsigned index;
619 int r; 620 int r;
@@ -629,12 +630,10 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
629 gpu_addr = adev->wb.gpu_addr + (index * 4); 630 gpu_addr = adev->wb.gpu_addr + (index * 4);
630 tmp = 0xCAFEDEAD; 631 tmp = 0xCAFEDEAD;
631 adev->wb.wb[index] = cpu_to_le32(tmp); 632 adev->wb.wb[index] = cpu_to_le32(tmp);
632
633 r = amdgpu_ib_get(ring, NULL, 256, &ib); 633 r = amdgpu_ib_get(ring, NULL, 256, &ib);
634 if (r) { 634 if (r) {
635 amdgpu_wb_free(adev, index);
636 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 635 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
637 return r; 636 goto err0;
638 } 637 }
639 638
640 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 639 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -643,20 +642,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
643 ib.ptr[3] = 1; 642 ib.ptr[3] = 1;
644 ib.ptr[4] = 0xDEADBEEF; 643 ib.ptr[4] = 0xDEADBEEF;
645 ib.length_dw = 5; 644 ib.length_dw = 5;
645 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
646 AMDGPU_FENCE_OWNER_UNDEFINED,
647 &f);
648 if (r)
649 goto err1;
646 650
647 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 651 r = fence_wait(f, false);
648 if (r) { 652 if (r) {
649 amdgpu_ib_free(adev, &ib);
650 amdgpu_wb_free(adev, index);
651 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
652 return r;
653 }
654 r = amdgpu_fence_wait(ib.fence, false);
655 if (r) {
656 amdgpu_ib_free(adev, &ib);
657 amdgpu_wb_free(adev, index);
658 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 653 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
659 return r; 654 goto err1;
660 } 655 }
661 for (i = 0; i < adev->usec_timeout; i++) { 656 for (i = 0; i < adev->usec_timeout; i++) {
662 tmp = le32_to_cpu(adev->wb.wb[index]); 657 tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -666,12 +661,17 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
666 } 661 }
667 if (i < adev->usec_timeout) { 662 if (i < adev->usec_timeout) {
668 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 663 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
669 ib.fence->ring->idx, i); 664 ring->idx, i);
665 goto err1;
670 } else { 666 } else {
671 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 667 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
672 r = -EINVAL; 668 r = -EINVAL;
673 } 669 }
670
671err1:
672 fence_put(f);
674 amdgpu_ib_free(adev, &ib); 673 amdgpu_ib_free(adev, &ib);
674err0:
675 amdgpu_wb_free(adev, index); 675 amdgpu_wb_free(adev, index);
676 return r; 676 return r;
677} 677}
@@ -1404,5 +1404,6 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1404 if (adev->vm_manager.vm_pte_funcs == NULL) { 1404 if (adev->vm_manager.vm_pte_funcs == NULL) {
1405 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1405 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1406 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1406 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
1407 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1407 } 1408 }
1408} 1409}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e70a26f587a0..4b255ac3043c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -126,9 +126,31 @@ static const u32 tonga_mgcg_cgcg_init[] =
126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
127}; 127};
128 128
129static const u32 golden_settings_fiji_a10[] =
130{
131 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
132 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
133 mmFBC_MISC, 0x1f311fff, 0x12300000,
134 mmHDMI_CONTROL, 0x31000111, 0x00000011,
135};
136
137static const u32 fiji_mgcg_cgcg_init[] =
138{
139 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
140 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
141};
142
129static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) 143static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
130{ 144{
131 switch (adev->asic_type) { 145 switch (adev->asic_type) {
146 case CHIP_FIJI:
147 amdgpu_program_register_sequence(adev,
148 fiji_mgcg_cgcg_init,
149 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
150 amdgpu_program_register_sequence(adev,
151 golden_settings_fiji_a10,
152 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
153 break;
132 case CHIP_TONGA: 154 case CHIP_TONGA:
133 amdgpu_program_register_sequence(adev, 155 amdgpu_program_register_sequence(adev,
134 tonga_mgcg_cgcg_init, 156 tonga_mgcg_cgcg_init,
@@ -803,11 +825,11 @@ static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
803 buffer_alloc = 2; 825 buffer_alloc = 2;
804 } else if (mode->crtc_hdisplay < 4096) { 826 } else if (mode->crtc_hdisplay < 4096) {
805 mem_cfg = 0; 827 mem_cfg = 0;
806 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; 828 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
807 } else { 829 } else {
808 DRM_DEBUG_KMS("Mode too big for LB!\n"); 830 DRM_DEBUG_KMS("Mode too big for LB!\n");
809 mem_cfg = 0; 831 mem_cfg = 0;
810 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; 832 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
811 } 833 }
812 } else { 834 } else {
813 mem_cfg = 1; 835 mem_cfg = 1;
@@ -2888,6 +2910,7 @@ static int dce_v10_0_early_init(void *handle)
2888 dce_v10_0_set_irq_funcs(adev); 2910 dce_v10_0_set_irq_funcs(adev);
2889 2911
2890 switch (adev->asic_type) { 2912 switch (adev->asic_type) {
2913 case CHIP_FIJI:
2891 case CHIP_TONGA: 2914 case CHIP_TONGA:
2892 adev->mode_info.num_crtc = 6; /* XXX 7??? */ 2915 adev->mode_info.num_crtc = 6; /* XXX 7??? */
2893 adev->mode_info.num_hpd = 6; 2916 adev->mode_info.num_hpd = 6;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index dcb402ee048a..70eee807421f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -801,11 +801,11 @@ static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
801 buffer_alloc = 2; 801 buffer_alloc = 2;
802 } else if (mode->crtc_hdisplay < 4096) { 802 } else if (mode->crtc_hdisplay < 4096) {
803 mem_cfg = 0; 803 mem_cfg = 0;
804 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; 804 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
805 } else { 805 } else {
806 DRM_DEBUG_KMS("Mode too big for LB!\n"); 806 DRM_DEBUG_KMS("Mode too big for LB!\n");
807 mem_cfg = 0; 807 mem_cfg = 0;
808 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; 808 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
809 } 809 }
810 } else { 810 } else {
811 mem_cfg = 1; 811 mem_cfg = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index cc050a329c49..c86911c2ea2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -770,11 +770,11 @@ static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
770 buffer_alloc = 2; 770 buffer_alloc = 2;
771 } else if (mode->crtc_hdisplay < 4096) { 771 } else if (mode->crtc_hdisplay < 4096) {
772 tmp = 0; 772 tmp = 0;
773 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; 773 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
774 } else { 774 } else {
775 DRM_DEBUG_KMS("Mode too big for LB!\n"); 775 DRM_DEBUG_KMS("Mode too big for LB!\n");
776 tmp = 0; 776 tmp = 0;
777 buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; 777 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
778 } 778 }
779 } else { 779 } else {
780 tmp = 1; 780 tmp = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
new file mode 100644
index 000000000000..8f9845d9a986
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "fiji_smumgr.h"
28
29MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
30
31static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int fiji_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 fiji_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/fiji_smc.bin";
45 int err;
46
47 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
48 if (err)
49 goto out;
50 err = amdgpu_ucode_validate(adev->pm.fw);
51
52out:
53 if (err) {
54 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
55 release_firmware(adev->pm.fw);
56 adev->pm.fw = NULL;
57 }
58 return err;
59}
60
61static int fiji_dpm_sw_init(void *handle)
62{
63 int ret;
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66 ret = fiji_dpm_init_microcode(adev);
67 if (ret)
68 return ret;
69
70 return 0;
71}
72
73static int fiji_dpm_sw_fini(void *handle)
74{
75 return 0;
76}
77
78static int fiji_dpm_hw_init(void *handle)
79{
80 int ret;
81 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
82
83 mutex_lock(&adev->pm.mutex);
84
85 ret = fiji_smu_init(adev);
86 if (ret) {
87 DRM_ERROR("SMU initialization failed\n");
88 goto fail;
89 }
90
91 ret = fiji_smu_start(adev);
92 if (ret) {
93 DRM_ERROR("SMU start failed\n");
94 goto fail;
95 }
96
97 mutex_unlock(&adev->pm.mutex);
98 return 0;
99
100fail:
101 adev->firmware.smu_load = false;
102 mutex_unlock(&adev->pm.mutex);
103 return -EINVAL;
104}
105
106static int fiji_dpm_hw_fini(void *handle)
107{
108 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
109 mutex_lock(&adev->pm.mutex);
110 fiji_smu_fini(adev);
111 mutex_unlock(&adev->pm.mutex);
112 return 0;
113}
114
115static int fiji_dpm_suspend(void *handle)
116{
117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118
119 fiji_dpm_hw_fini(adev);
120
121 return 0;
122}
123
124static int fiji_dpm_resume(void *handle)
125{
126 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
127
128 fiji_dpm_hw_init(adev);
129
130 return 0;
131}
132
133static int fiji_dpm_set_clockgating_state(void *handle,
134 enum amd_clockgating_state state)
135{
136 return 0;
137}
138
139static int fiji_dpm_set_powergating_state(void *handle,
140 enum amd_powergating_state state)
141{
142 return 0;
143}
144
145const struct amd_ip_funcs fiji_dpm_ip_funcs = {
146 .early_init = fiji_dpm_early_init,
147 .late_init = NULL,
148 .sw_init = fiji_dpm_sw_init,
149 .sw_fini = fiji_dpm_sw_fini,
150 .hw_init = fiji_dpm_hw_init,
151 .hw_fini = fiji_dpm_hw_fini,
152 .suspend = fiji_dpm_suspend,
153 .resume = fiji_dpm_resume,
154 .is_idle = NULL,
155 .wait_for_idle = NULL,
156 .soft_reset = NULL,
157 .print_status = NULL,
158 .set_clockgating_state = fiji_dpm_set_clockgating_state,
159 .set_powergating_state = fiji_dpm_set_powergating_state,
160};
161
162static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
163 .get_temperature = NULL,
164 .pre_set_power_state = NULL,
165 .set_power_state = NULL,
166 .post_set_power_state = NULL,
167 .display_configuration_changed = NULL,
168 .get_sclk = NULL,
169 .get_mclk = NULL,
170 .print_power_state = NULL,
171 .debugfs_print_current_performance_level = NULL,
172 .force_performance_level = NULL,
173 .vblank_too_short = NULL,
174 .powergate_uvd = NULL,
175};
176
177static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
178{
179 if (NULL == adev->pm.funcs)
180 adev->pm.funcs = &fiji_dpm_funcs;
181}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
new file mode 100644
index 000000000000..3c4824082990
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_PP_SMC_H
25#define FIJI_PP_SMC_H
26
27#pragma pack(push, 1)
28
29#define PPSMC_SWSTATE_FLAG_DC 0x01
30#define PPSMC_SWSTATE_FLAG_UVD 0x02
31#define PPSMC_SWSTATE_FLAG_VCE 0x04
32
33#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
34#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
35#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
36
37#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
38#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
39#define PPSMC_SYSTEMFLAG_GDDR5 0x04
40
41#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
42
43#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
44#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
45
46#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
47#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
48
49#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
50#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
51
52#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
53#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
54#define PPSMC_DPM2FLAGS_OCP 0x04
55
56#define PPSMC_DISPLAY_WATERMARK_LOW 0
57#define PPSMC_DISPLAY_WATERMARK_HIGH 1
58
59#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
60#define PPSMC_STATEFLAG_POWERBOOST 0x02
61#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
62#define PPSMC_STATEFLAG_POWERSHIFT 0x08
63#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
64#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
65#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
66
67#define FDO_MODE_HARDWARE 0
68#define FDO_MODE_PIECE_WISE_LINEAR 1
69
70enum FAN_CONTROL {
71 FAN_CONTROL_FUZZY,
72 FAN_CONTROL_TABLE
73};
74
75//Gemini Modes
76#define PPSMC_GeminiModeNone 0 //Single GPU board
77#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board
78#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board
79
80#define PPSMC_Result_OK ((uint16_t)0x01)
81#define PPSMC_Result_NoMore ((uint16_t)0x02)
82#define PPSMC_Result_NotNow ((uint16_t)0x03)
83#define PPSMC_Result_Failed ((uint16_t)0xFF)
84#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
85#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
86
87typedef uint16_t PPSMC_Result;
88
89#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
90
91#define PPSMC_MSG_Halt ((uint16_t)0x10)
92#define PPSMC_MSG_Resume ((uint16_t)0x11)
93#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
94#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
95#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
96#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
97#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
98#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
99#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
100#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
101#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
102#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
103#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
104#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
105#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
106#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
107#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
108#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
109#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
110#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
111#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
112#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
113#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
114#define PPSMC_CACHistoryStart ((uint16_t)0x57)
115#define PPSMC_CACHistoryStop ((uint16_t)0x58)
116#define PPSMC_TDPClampingActive ((uint16_t)0x59)
117#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
118#define PPSMC_StartFanControl ((uint16_t)0x5B)
119#define PPSMC_StopFanControl ((uint16_t)0x5C)
120#define PPSMC_NoDisplay ((uint16_t)0x5D)
121#define PPSMC_HasDisplay ((uint16_t)0x5E)
122#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
123#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
124#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
125#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
126#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
127#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
128#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
129#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
130#define PPSMC_OCPActive ((uint16_t)0x6C)
131#define PPSMC_OCPInactive ((uint16_t)0x6D)
132#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
133#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
134#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
135#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
136#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
137#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
138#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
139#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
140#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
141#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
142#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
143#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
144#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
145#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
146#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
147#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
148#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
149#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
150#define PPSMC_FlushDataCache ((uint16_t)0x80)
151#define PPSMC_FlushInstrCache ((uint16_t)0x81)
152#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
153#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
154#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
155#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
156#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
157#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
158#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
159#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
160#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
161#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
162#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
163
164#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
165
166#define PPSMC_MSG_Test ((uint16_t)0x100)
167#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
168#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
169#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
170#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
171#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
172
173typedef uint16_t PPSMC_Msg;
174
175#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
176#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
177#define PPSMC_EVENT_STATUS_DC 0x00000004
178#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
179
180#pragma pack(pop)
181
182#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
new file mode 100644
index 000000000000..493c8c9c7faa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -0,0 +1,853 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "fiji_ppsmc.h"
28#include "fiji_smumgr.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_3_d.h"
33#include "smu/smu_7_1_3_sh_mask.h"
34
35#define FIJI_SMC_SIZE 0x20000
36
37static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
38{
39 uint32_t val;
40
41 if (smc_address & 3)
42 return -EINVAL;
43
44 if ((smc_address + 3) > limit)
45 return -EINVAL;
46
47 WREG32(mmSMC_IND_INDEX_0, smc_address);
48
49 val = RREG32(mmSMC_IND_ACCESS_CNTL);
50 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
51 WREG32(mmSMC_IND_ACCESS_CNTL, val);
52
53 return 0;
54}
55
56static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
57{
58 uint32_t addr;
59 uint32_t data, orig_data;
60 int result = 0;
61 uint32_t extra_shift;
62 unsigned long flags;
63
64 if (smc_start_address & 3)
65 return -EINVAL;
66
67 if ((smc_start_address + byte_count) > limit)
68 return -EINVAL;
69
70 addr = smc_start_address;
71
72 spin_lock_irqsave(&adev->smc_idx_lock, flags);
73 while (byte_count >= 4) {
74 /* Bytes are written into the SMC addres space with the MSB first */
75 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
76
77 result = fiji_set_smc_sram_address(adev, addr, limit);
78
79 if (result)
80 goto out;
81
82 WREG32(mmSMC_IND_DATA_0, data);
83
84 src += 4;
85 byte_count -= 4;
86 addr += 4;
87 }
88
89 if (0 != byte_count) {
90 /* Now write odd bytes left, do a read modify write cycle */
91 data = 0;
92
93 result = fiji_set_smc_sram_address(adev, addr, limit);
94 if (result)
95 goto out;
96
97 orig_data = RREG32(mmSMC_IND_DATA_0);
98 extra_shift = 8 * (4 - byte_count);
99
100 while (byte_count > 0) {
101 data = (data << 8) + *src++;
102 byte_count--;
103 }
104
105 data <<= extra_shift;
106 data |= (orig_data & ~((~0UL) << extra_shift));
107
108 result = fiji_set_smc_sram_address(adev, addr, limit);
109 if (result)
110 goto out;
111
112 WREG32(mmSMC_IND_DATA_0, data);
113 }
114
115out:
116 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 return result;
118}
119
120static int fiji_program_jump_on_start(struct amdgpu_device *adev)
121{
122 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123 fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124
125 return 0;
126}
127
128static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
129{
130 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132
133 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134}
135
136static int wait_smu_response(struct amdgpu_device *adev)
137{
138 int i;
139 uint32_t val;
140
141 for (i = 0; i < adev->usec_timeout; i++) {
142 val = RREG32(mmSMC_RESP_0);
143 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144 break;
145 udelay(1);
146 }
147
148 if (i == adev->usec_timeout)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
155{
156 if (wait_smu_response(adev)) {
157 DRM_ERROR("Failed to send previous message\n");
158 return -EINVAL;
159 }
160
161 WREG32(mmSMC_MSG_ARG_0, 0x20000);
162 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163
164 if (wait_smu_response(adev)) {
165 DRM_ERROR("Failed to send message\n");
166 return -EINVAL;
167 }
168
169 return 0;
170}
171
172static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{
174 if (!fiji_is_smc_ram_running(adev))
175 {
176 return -EINVAL;;
177 }
178
179 if (wait_smu_response(adev)) {
180 DRM_ERROR("Failed to send previous message\n");
181 return -EINVAL;
182 }
183
184 WREG32(mmSMC_MESSAGE_0, msg);
185
186 if (wait_smu_response(adev)) {
187 DRM_ERROR("Failed to send message\n");
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195 PPSMC_Msg msg)
196{
197 if (wait_smu_response(adev)) {
198 DRM_ERROR("Failed to send previous message\n");
199 return -EINVAL;
200 }
201
202 WREG32(mmSMC_MESSAGE_0, msg);
203
204 return 0;
205}
206
207static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208 PPSMC_Msg msg,
209 uint32_t parameter)
210{
211 if (!fiji_is_smc_ram_running(adev))
212 return -EINVAL;
213
214 if (wait_smu_response(adev)) {
215 DRM_ERROR("Failed to send previous message\n");
216 return -EINVAL;
217 }
218
219 WREG32(mmSMC_MSG_ARG_0, parameter);
220
221 return fiji_send_msg_to_smc(adev, msg);
222}
223
224static int fiji_send_msg_to_smc_with_parameter_without_waiting(
225 struct amdgpu_device *adev,
226 PPSMC_Msg msg, uint32_t parameter)
227{
228 if (wait_smu_response(adev)) {
229 DRM_ERROR("Failed to send previous message\n");
230 return -EINVAL;
231 }
232
233 WREG32(mmSMC_MSG_ARG_0, parameter);
234
235 return fiji_send_msg_to_smc_without_waiting(adev, msg);
236}
237
238#if 0 /* not used yet */
239static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
240{
241 int i;
242 uint32_t val;
243
244 if (!fiji_is_smc_ram_running(adev))
245 return -EINVAL;
246
247 for (i = 0; i < adev->usec_timeout; i++) {
248 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250 break;
251 udelay(1);
252 }
253
254 if (i == adev->usec_timeout)
255 return -EINVAL;
256
257 return 0;
258}
259#endif
260
261static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
262{
263 const struct smc_firmware_header_v1_0 *hdr;
264 uint32_t ucode_size;
265 uint32_t ucode_start_address;
266 const uint8_t *src;
267 uint32_t val;
268 uint32_t byte_count;
269 uint32_t *data;
270 unsigned long flags;
271
272 if (!adev->pm.fw)
273 return -EINVAL;
274
275 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
276 amdgpu_ucode_print_smc_hdr(&hdr->header);
277
278 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
279 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
280 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
281 src = (const uint8_t *)
282 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
283
284 if (ucode_size & 3) {
285 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
286 return -EINVAL;
287 }
288
289 if (ucode_size > FIJI_SMC_SIZE) {
290 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
291 return -EINVAL;
292 }
293
294 spin_lock_irqsave(&adev->smc_idx_lock, flags);
295 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
296
297 val = RREG32(mmSMC_IND_ACCESS_CNTL);
298 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
299 WREG32(mmSMC_IND_ACCESS_CNTL, val);
300
301 byte_count = ucode_size;
302 data = (uint32_t *)src;
303 for (; byte_count >= 4; data++, byte_count -= 4)
304 WREG32(mmSMC_IND_DATA_0, data[0]);
305
306 val = RREG32(mmSMC_IND_ACCESS_CNTL);
307 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
308 WREG32(mmSMC_IND_ACCESS_CNTL, val);
309 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
310
311 return 0;
312}
313
314#if 0 /* not used yet */
315static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
316 uint32_t smc_address,
317 uint32_t *value,
318 uint32_t limit)
319{
320 int result;
321 unsigned long flags;
322
323 spin_lock_irqsave(&adev->smc_idx_lock, flags);
324 result = fiji_set_smc_sram_address(adev, smc_address, limit);
325 if (result == 0)
326 *value = RREG32(mmSMC_IND_DATA_0);
327 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
328 return result;
329}
330
331static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
332 uint32_t smc_address,
333 uint32_t value,
334 uint32_t limit)
335{
336 int result;
337 unsigned long flags;
338
339 spin_lock_irqsave(&adev->smc_idx_lock, flags);
340 result = fiji_set_smc_sram_address(adev, smc_address, limit);
341 if (result == 0)
342 WREG32(mmSMC_IND_DATA_0, value);
343 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
344 return result;
345}
346
347static int fiji_smu_stop_smc(struct amdgpu_device *adev)
348{
349 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
350 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
351 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
352
353 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
354 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
355 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
356
357 return 0;
358}
359#endif
360
361static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
362{
363 switch (fw_type) {
364 case UCODE_ID_SDMA0:
365 return AMDGPU_UCODE_ID_SDMA0;
366 case UCODE_ID_SDMA1:
367 return AMDGPU_UCODE_ID_SDMA1;
368 case UCODE_ID_CP_CE:
369 return AMDGPU_UCODE_ID_CP_CE;
370 case UCODE_ID_CP_PFP:
371 return AMDGPU_UCODE_ID_CP_PFP;
372 case UCODE_ID_CP_ME:
373 return AMDGPU_UCODE_ID_CP_ME;
374 case UCODE_ID_CP_MEC:
375 case UCODE_ID_CP_MEC_JT1:
376 case UCODE_ID_CP_MEC_JT2:
377 return AMDGPU_UCODE_ID_CP_MEC1;
378 case UCODE_ID_RLC_G:
379 return AMDGPU_UCODE_ID_RLC_G;
380 default:
381 DRM_ERROR("ucode type is out of range!\n");
382 return AMDGPU_UCODE_ID_MAXIMUM;
383 }
384}
385
386static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
387 uint32_t fw_type,
388 struct SMU_Entry *entry)
389{
390 enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
391 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
392 const struct gfx_firmware_header_v1_0 *header = NULL;
393 uint64_t gpu_addr;
394 uint32_t data_size;
395
396 if (ucode->fw == NULL)
397 return -EINVAL;
398 gpu_addr = ucode->mc_addr;
399 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
400 data_size = le32_to_cpu(header->header.ucode_size_bytes);
401
402 if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
403 (fw_type == UCODE_ID_CP_MEC_JT2)) {
404 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
405 data_size = le32_to_cpu(header->jt_size) << 2;
406 }
407
408 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
409 entry->id = (uint16_t)fw_type;
410 entry->image_addr_high = upper_32_bits(gpu_addr);
411 entry->image_addr_low = lower_32_bits(gpu_addr);
412 entry->meta_data_addr_high = 0;
413 entry->meta_data_addr_low = 0;
414 entry->data_size_byte = data_size;
415 entry->num_register_entries = 0;
416
417 if (fw_type == UCODE_ID_RLC_G)
418 entry->flags = 1;
419 else
420 entry->flags = 0;
421
422 return 0;
423}
424
425static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
426{
427 struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
428 struct SMU_DRAMData_TOC *toc;
429 uint32_t fw_to_load;
430
431 WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
432
433 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
434 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
435
436 toc = (struct SMU_DRAMData_TOC *)private->header;
437 toc->num_entries = 0;
438 toc->structure_version = 1;
439
440 if (!adev->firmware.smu_load)
441 return 0;
442
443 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
444 &toc->entry[toc->num_entries++])) {
445 DRM_ERROR("Failed to get firmware entry for RLC\n");
446 return -EINVAL;
447 }
448
449 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
450 &toc->entry[toc->num_entries++])) {
451 DRM_ERROR("Failed to get firmware entry for CE\n");
452 return -EINVAL;
453 }
454
455 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
456 &toc->entry[toc->num_entries++])) {
457 DRM_ERROR("Failed to get firmware entry for PFP\n");
458 return -EINVAL;
459 }
460
461 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
462 &toc->entry[toc->num_entries++])) {
463 DRM_ERROR("Failed to get firmware entry for ME\n");
464 return -EINVAL;
465 }
466
467 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
468 &toc->entry[toc->num_entries++])) {
469 DRM_ERROR("Failed to get firmware entry for MEC\n");
470 return -EINVAL;
471 }
472
473 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
474 &toc->entry[toc->num_entries++])) {
475 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
476 return -EINVAL;
477 }
478
479 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
480 &toc->entry[toc->num_entries++])) {
481 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
482 return -EINVAL;
483 }
484
485 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
486 &toc->entry[toc->num_entries++])) {
487 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
488 return -EINVAL;
489 }
490
491 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
492 &toc->entry[toc->num_entries++])) {
493 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
494 return -EINVAL;
495 }
496
497 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
498 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
499
500 fw_to_load = UCODE_ID_RLC_G_MASK |
501 UCODE_ID_SDMA0_MASK |
502 UCODE_ID_SDMA1_MASK |
503 UCODE_ID_CP_CE_MASK |
504 UCODE_ID_CP_ME_MASK |
505 UCODE_ID_CP_PFP_MASK |
506 UCODE_ID_CP_MEC_MASK;
507
508 if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
509 DRM_ERROR("Fail to request SMU load ucode\n");
510 return -EINVAL;
511 }
512
513 return 0;
514}
515
516static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
517{
518 switch (fw_type) {
519 case AMDGPU_UCODE_ID_SDMA0:
520 return UCODE_ID_SDMA0_MASK;
521 case AMDGPU_UCODE_ID_SDMA1:
522 return UCODE_ID_SDMA1_MASK;
523 case AMDGPU_UCODE_ID_CP_CE:
524 return UCODE_ID_CP_CE_MASK;
525 case AMDGPU_UCODE_ID_CP_PFP:
526 return UCODE_ID_CP_PFP_MASK;
527 case AMDGPU_UCODE_ID_CP_ME:
528 return UCODE_ID_CP_ME_MASK;
529 case AMDGPU_UCODE_ID_CP_MEC1:
530 return UCODE_ID_CP_MEC_MASK;
531 case AMDGPU_UCODE_ID_CP_MEC2:
532 return UCODE_ID_CP_MEC_MASK;
533 case AMDGPU_UCODE_ID_RLC_G:
534 return UCODE_ID_RLC_G_MASK;
535 default:
536 DRM_ERROR("ucode type is out of range!\n");
537 return 0;
538 }
539}
540
541static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
542 uint32_t fw_type)
543{
544 uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
545 int i;
546
547 for (i = 0; i < adev->usec_timeout; i++) {
548 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
549 break;
550 udelay(1);
551 }
552
553 if (i == adev->usec_timeout) {
554 DRM_ERROR("check firmware loading failed\n");
555 return -EINVAL;
556 }
557
558 return 0;
559}
560
561static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
562{
563 int result;
564 uint32_t val;
565 int i;
566
567 /* Assert reset */
568 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
569 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
570 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
571
572 result = fiji_smu_upload_firmware_image(adev);
573 if (result)
574 return result;
575
576 /* Clear status */
577 WREG32_SMC(ixSMU_STATUS, 0);
578
579 /* Enable clock */
580 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
581 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
582 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
583
584 /* De-assert reset */
585 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
586 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
587 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
588
589 /* Set SMU Auto Start */
590 val = RREG32_SMC(ixSMU_INPUT_DATA);
591 val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
592 WREG32_SMC(ixSMU_INPUT_DATA, val);
593
594 /* Clear firmware interrupt enable flag */
595 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
596
597 for (i = 0; i < adev->usec_timeout; i++) {
598 val = RREG32_SMC(ixRCU_UC_EVENTS);
599 if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
600 break;
601 udelay(1);
602 }
603
604 if (i == adev->usec_timeout) {
605 DRM_ERROR("Interrupt is not enabled by firmware\n");
606 return -EINVAL;
607 }
608
609 /* Call Test SMU message with 0x20000 offset
610 * to trigger SMU start
611 */
612 fiji_send_msg_to_smc_offset(adev);
613 DRM_INFO("[FM]try triger smu start\n");
614 /* Wait for done bit to be set */
615 for (i = 0; i < adev->usec_timeout; i++) {
616 val = RREG32_SMC(ixSMU_STATUS);
617 if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
618 break;
619 udelay(1);
620 }
621
622 if (i == adev->usec_timeout) {
623 DRM_ERROR("Timeout for SMU start\n");
624 return -EINVAL;
625 }
626
627 /* Check pass/failed indicator */
628 val = RREG32_SMC(ixSMU_STATUS);
629 if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
630 DRM_ERROR("SMU Firmware start failed\n");
631 return -EINVAL;
632 }
633 DRM_INFO("[FM]smu started\n");
634 /* Wait for firmware to initialize */
635 for (i = 0; i < adev->usec_timeout; i++) {
636 val = RREG32_SMC(ixFIRMWARE_FLAGS);
637 if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
638 break;
639 udelay(1);
640 }
641
642 if (i == adev->usec_timeout) {
643 DRM_ERROR("SMU firmware initialization failed\n");
644 return -EINVAL;
645 }
646 DRM_INFO("[FM]smu initialized\n");
647
648 return 0;
649}
650
651static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
652{
653 int i, result;
654 uint32_t val;
655
656 /* wait for smc boot up */
657 for (i = 0; i < adev->usec_timeout; i++) {
658 val = RREG32_SMC(ixRCU_UC_EVENTS);
659 val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
660 if (val)
661 break;
662 udelay(1);
663 }
664
665 if (i == adev->usec_timeout) {
666 DRM_ERROR("SMC boot sequence is not completed\n");
667 return -EINVAL;
668 }
669
670 /* Clear firmware interrupt enable flag */
671 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
672
673 /* Assert reset */
674 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
675 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
676 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
677
678 result = fiji_smu_upload_firmware_image(adev);
679 if (result)
680 return result;
681
682 /* Set smc instruct start point at 0x0 */
683 fiji_program_jump_on_start(adev);
684
685 /* Enable clock */
686 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
687 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
688 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
689
690 /* De-assert reset */
691 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
692 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
693 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
694
695 /* Wait for firmware to initialize */
696 for (i = 0; i < adev->usec_timeout; i++) {
697 val = RREG32_SMC(ixFIRMWARE_FLAGS);
698 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
699 break;
700 udelay(1);
701 }
702
703 if (i == adev->usec_timeout) {
704 DRM_ERROR("Timeout for SMC firmware initialization\n");
705 return -EINVAL;
706 }
707
708 return 0;
709}
710
711int fiji_smu_start(struct amdgpu_device *adev)
712{
713 int result;
714 uint32_t val;
715
716 if (!fiji_is_smc_ram_running(adev)) {
717 val = RREG32_SMC(ixSMU_FIRMWARE);
718 if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
719 DRM_INFO("[FM]start smu in nonprotection mode\n");
720 result = fiji_smu_start_in_non_protection_mode(adev);
721 if (result)
722 return result;
723 } else {
724 DRM_INFO("[FM]start smu in protection mode\n");
725 result = fiji_smu_start_in_protection_mode(adev);
726 if (result)
727 return result;
728 }
729 }
730
731 return fiji_smu_request_load_fw(adev);
732}
733
734static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
735 .check_fw_load_finish = fiji_smu_check_fw_load_finish,
736 .request_smu_load_fw = NULL,
737 .request_smu_specific_fw = NULL,
738};
739
740int fiji_smu_init(struct amdgpu_device *adev)
741{
742 struct fiji_smu_private_data *private;
743 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
744 uint32_t smu_internal_buffer_size = 200*4096;
745 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
746 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
747 uint64_t mc_addr;
748 void *toc_buf_ptr;
749 void *smu_buf_ptr;
750 int ret;
751
752 private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
753 if (NULL == private)
754 return -ENOMEM;
755
756 /* allocate firmware buffers */
757 if (adev->firmware.smu_load)
758 amdgpu_ucode_init_bo(adev);
759
760 adev->smu.priv = private;
761 adev->smu.fw_flags = 0;
762
763 /* Allocate FW image data structure and header buffer */
764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
765 true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
766 if (ret) {
767 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
768 return -ENOMEM;
769 }
770
771 /* Allocate buffer for SMU internal buffer */
772 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
773 true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
774 if (ret) {
775 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
776 return -ENOMEM;
777 }
778
779 /* Retrieve GPU address for header buffer and internal buffer */
780 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
781 if (ret) {
782 amdgpu_bo_unref(&adev->smu.toc_buf);
783 DRM_ERROR("Failed to reserve the TOC buffer\n");
784 return -EINVAL;
785 }
786
787 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
788 if (ret) {
789 amdgpu_bo_unreserve(adev->smu.toc_buf);
790 amdgpu_bo_unref(&adev->smu.toc_buf);
791 DRM_ERROR("Failed to pin the TOC buffer\n");
792 return -EINVAL;
793 }
794
795 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
796 if (ret) {
797 amdgpu_bo_unreserve(adev->smu.toc_buf);
798 amdgpu_bo_unref(&adev->smu.toc_buf);
799 DRM_ERROR("Failed to map the TOC buffer\n");
800 return -EINVAL;
801 }
802
803 amdgpu_bo_unreserve(adev->smu.toc_buf);
804 private->header_addr_low = lower_32_bits(mc_addr);
805 private->header_addr_high = upper_32_bits(mc_addr);
806 private->header = toc_buf_ptr;
807
808 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
809 if (ret) {
810 amdgpu_bo_unref(&adev->smu.smu_buf);
811 amdgpu_bo_unref(&adev->smu.toc_buf);
812 DRM_ERROR("Failed to reserve the SMU internal buffer\n");
813 return -EINVAL;
814 }
815
816 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
817 if (ret) {
818 amdgpu_bo_unreserve(adev->smu.smu_buf);
819 amdgpu_bo_unref(&adev->smu.smu_buf);
820 amdgpu_bo_unref(&adev->smu.toc_buf);
821 DRM_ERROR("Failed to pin the SMU internal buffer\n");
822 return -EINVAL;
823 }
824
825 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
826 if (ret) {
827 amdgpu_bo_unreserve(adev->smu.smu_buf);
828 amdgpu_bo_unref(&adev->smu.smu_buf);
829 amdgpu_bo_unref(&adev->smu.toc_buf);
830 DRM_ERROR("Failed to map the SMU internal buffer\n");
831 return -EINVAL;
832 }
833
834 amdgpu_bo_unreserve(adev->smu.smu_buf);
835 private->smu_buffer_addr_low = lower_32_bits(mc_addr);
836 private->smu_buffer_addr_high = upper_32_bits(mc_addr);
837
838 adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
839
840 return 0;
841}
842
843int fiji_smu_fini(struct amdgpu_device *adev)
844{
845 amdgpu_bo_unref(&adev->smu.toc_buf);
846 amdgpu_bo_unref(&adev->smu.smu_buf);
847 kfree(adev->smu.priv);
848 adev->smu.priv = NULL;
849 if (adev->firmware.fw_buf)
850 amdgpu_ucode_fini_bo(adev);
851
852 return 0;
853}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
index 0698764354a2..1cef03deeac3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
@@ -1,7 +1,5 @@
1/* 1/*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 * 3 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -21,42 +19,24 @@
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
23 * 21 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */ 22 */
28 23
29/* this file defines the CHIP_ and family flags used in the pciids, 24#ifndef FIJI_SMUMGR_H
30 * its is common between kms and non-kms because duplicating it and 25#define FIJI_SMUMGR_H
31 * changing one place is fail.
32 */
33#ifndef AMDGPU_FAMILY_H
34#define AMDGPU_FAMILY_H
35/*
36 * Supported ASIC types
37 */
38enum amdgpu_asic_type {
39 CHIP_BONAIRE = 0,
40 CHIP_KAVERI,
41 CHIP_KABINI,
42 CHIP_HAWAII,
43 CHIP_MULLINS,
44 CHIP_TOPAZ,
45 CHIP_TONGA,
46 CHIP_CARRIZO,
47 CHIP_LAST,
48};
49 26
50/* 27#include "fiji_ppsmc.h"
51 * Chip flags 28
52 */ 29int fiji_smu_init(struct amdgpu_device *adev);
53enum amdgpu_chip_flags { 30int fiji_smu_fini(struct amdgpu_device *adev);
54 AMDGPU_ASIC_MASK = 0x0000ffffUL, 31int fiji_smu_start(struct amdgpu_device *adev);
55 AMDGPU_FLAGS_MASK = 0xffff0000UL, 32
56 AMDGPU_IS_MOBILITY = 0x00010000UL, 33struct fiji_smu_private_data
57 AMDGPU_IS_APU = 0x00020000UL, 34{
58 AMDGPU_IS_PX = 0x00040000UL, 35 uint8_t *header;
59 AMDGPU_EXP_HW_SUPPORT = 0x00080000UL, 36 uint32_t smu_buffer_addr_high;
37 uint32_t smu_buffer_addr_low;
38 uint32_t header_addr_high;
39 uint32_t header_addr_low;
60}; 40};
61 41
62#endif 42#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0d8bf2cb1956..9b0cab413677 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2173,7 +2173,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
2173 2173
2174 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 2174 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
2175 adev->gfx.config.mem_max_burst_length_bytes = 256; 2175 adev->gfx.config.mem_max_burst_length_bytes = 256;
2176 if (adev->flags & AMDGPU_IS_APU) { 2176 if (adev->flags & AMD_IS_APU) {
2177 /* Get memory bank mapping mode. */ 2177 /* Get memory bank mapping mode. */
2178 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 2178 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
2179 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 2179 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2648,6 +2648,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
2648{ 2648{
2649 struct amdgpu_device *adev = ring->adev; 2649 struct amdgpu_device *adev = ring->adev;
2650 struct amdgpu_ib ib; 2650 struct amdgpu_ib ib;
2651 struct fence *f = NULL;
2651 uint32_t scratch; 2652 uint32_t scratch;
2652 uint32_t tmp = 0; 2653 uint32_t tmp = 0;
2653 unsigned i; 2654 unsigned i;
@@ -2662,26 +2663,23 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
2662 r = amdgpu_ib_get(ring, NULL, 256, &ib); 2663 r = amdgpu_ib_get(ring, NULL, 256, &ib);
2663 if (r) { 2664 if (r) {
2664 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 2665 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
2665 amdgpu_gfx_scratch_free(adev, scratch); 2666 goto err1;
2666 return r;
2667 } 2667 }
2668 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 2668 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2669 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); 2669 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2670 ib.ptr[2] = 0xDEADBEEF; 2670 ib.ptr[2] = 0xDEADBEEF;
2671 ib.length_dw = 3; 2671 ib.length_dw = 3;
2672 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 2672
2673 if (r) { 2673 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
2674 amdgpu_gfx_scratch_free(adev, scratch); 2674 AMDGPU_FENCE_OWNER_UNDEFINED,
2675 amdgpu_ib_free(adev, &ib); 2675 &f);
2676 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 2676 if (r)
2677 return r; 2677 goto err2;
2678 } 2678
2679 r = amdgpu_fence_wait(ib.fence, false); 2679 r = fence_wait(f, false);
2680 if (r) { 2680 if (r) {
2681 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 2681 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
2682 amdgpu_gfx_scratch_free(adev, scratch); 2682 goto err2;
2683 amdgpu_ib_free(adev, &ib);
2684 return r;
2685 } 2683 }
2686 for (i = 0; i < adev->usec_timeout; i++) { 2684 for (i = 0; i < adev->usec_timeout; i++) {
2687 tmp = RREG32(scratch); 2685 tmp = RREG32(scratch);
@@ -2691,14 +2689,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
2691 } 2689 }
2692 if (i < adev->usec_timeout) { 2690 if (i < adev->usec_timeout) {
2693 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 2691 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
2694 ib.fence->ring->idx, i); 2692 ring->idx, i);
2693 goto err2;
2695 } else { 2694 } else {
2696 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 2695 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2697 scratch, tmp); 2696 scratch, tmp);
2698 r = -EINVAL; 2697 r = -EINVAL;
2699 } 2698 }
2700 amdgpu_gfx_scratch_free(adev, scratch); 2699
2700err2:
2701 fence_put(f);
2701 amdgpu_ib_free(adev, &ib); 2702 amdgpu_ib_free(adev, &ib);
2703err1:
2704 amdgpu_gfx_scratch_free(adev, scratch);
2702 return r; 2705 return r;
2703} 2706}
2704 2707
@@ -3758,7 +3761,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3758 int r; 3761 int r;
3759 3762
3760 /* allocate rlc buffers */ 3763 /* allocate rlc buffers */
3761 if (adev->flags & AMDGPU_IS_APU) { 3764 if (adev->flags & AMD_IS_APU) {
3762 if (adev->asic_type == CHIP_KAVERI) { 3765 if (adev->asic_type == CHIP_KAVERI) {
3763 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; 3766 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3764 adev->gfx.rlc.reg_list_size = 3767 adev->gfx.rlc.reg_list_size =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index f5a42ab1f65c..4b68e6306f40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -87,6 +87,13 @@ MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
87MODULE_FIRMWARE("amdgpu/topaz_mec2.bin"); 87MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
88MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); 88MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
89 89
90MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
91MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
92MODULE_FIRMWARE("amdgpu/fiji_me.bin");
93MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
94MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
95MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
96
90static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = 97static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
91{ 98{
92 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, 99 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -217,6 +224,71 @@ static const u32 tonga_mgcg_cgcg_init[] =
217 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 224 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
218}; 225};
219 226
227static const u32 fiji_golden_common_all[] =
228{
229 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
230 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
231 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
232 mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
233 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
234 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
235 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
236 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
237};
238
239static const u32 golden_settings_fiji_a10[] =
240{
241 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
242 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
243 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
244 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
245 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
246 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
247 mmTCC_CTRL, 0x00100000, 0xf30fff7f,
248 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
249 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4,
250 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
251};
252
253static const u32 fiji_mgcg_cgcg_init[] =
254{
255 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0,
256 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
257 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
258 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
259 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
260 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
261 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
262 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
263 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
264 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
265 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
266 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
267 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
268 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
269 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
270 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
271 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
272 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
273 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
274 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
275 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
276 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
277 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
278 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
279 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
280 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
281 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
282 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
283 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
284 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
285 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
286 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
287 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
288 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
289 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
290};
291
220static const u32 golden_settings_iceland_a11[] = 292static const u32 golden_settings_iceland_a11[] =
221{ 293{
222 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, 294 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
@@ -439,6 +511,18 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
439 iceland_golden_common_all, 511 iceland_golden_common_all,
440 (const u32)ARRAY_SIZE(iceland_golden_common_all)); 512 (const u32)ARRAY_SIZE(iceland_golden_common_all));
441 break; 513 break;
514 case CHIP_FIJI:
515 amdgpu_program_register_sequence(adev,
516 fiji_mgcg_cgcg_init,
517 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
518 amdgpu_program_register_sequence(adev,
519 golden_settings_fiji_a10,
520 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
521 amdgpu_program_register_sequence(adev,
522 fiji_golden_common_all,
523 (const u32)ARRAY_SIZE(fiji_golden_common_all));
524 break;
525
442 case CHIP_TONGA: 526 case CHIP_TONGA:
443 amdgpu_program_register_sequence(adev, 527 amdgpu_program_register_sequence(adev,
444 tonga_mgcg_cgcg_init, 528 tonga_mgcg_cgcg_init,
@@ -526,6 +610,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
526{ 610{
527 struct amdgpu_device *adev = ring->adev; 611 struct amdgpu_device *adev = ring->adev;
528 struct amdgpu_ib ib; 612 struct amdgpu_ib ib;
613 struct fence *f = NULL;
529 uint32_t scratch; 614 uint32_t scratch;
530 uint32_t tmp = 0; 615 uint32_t tmp = 0;
531 unsigned i; 616 unsigned i;
@@ -540,26 +625,23 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
540 r = amdgpu_ib_get(ring, NULL, 256, &ib); 625 r = amdgpu_ib_get(ring, NULL, 256, &ib);
541 if (r) { 626 if (r) {
542 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 627 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
543 amdgpu_gfx_scratch_free(adev, scratch); 628 goto err1;
544 return r;
545 } 629 }
546 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 630 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
547 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); 631 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
548 ib.ptr[2] = 0xDEADBEEF; 632 ib.ptr[2] = 0xDEADBEEF;
549 ib.length_dw = 3; 633 ib.length_dw = 3;
550 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 634
551 if (r) { 635 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
552 amdgpu_gfx_scratch_free(adev, scratch); 636 AMDGPU_FENCE_OWNER_UNDEFINED,
553 amdgpu_ib_free(adev, &ib); 637 &f);
554 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 638 if (r)
555 return r; 639 goto err2;
556 } 640
557 r = amdgpu_fence_wait(ib.fence, false); 641 r = fence_wait(f, false);
558 if (r) { 642 if (r) {
559 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 643 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
560 amdgpu_gfx_scratch_free(adev, scratch); 644 goto err2;
561 amdgpu_ib_free(adev, &ib);
562 return r;
563 } 645 }
564 for (i = 0; i < adev->usec_timeout; i++) { 646 for (i = 0; i < adev->usec_timeout; i++) {
565 tmp = RREG32(scratch); 647 tmp = RREG32(scratch);
@@ -569,14 +651,18 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
569 } 651 }
570 if (i < adev->usec_timeout) { 652 if (i < adev->usec_timeout) {
571 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 653 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
572 ib.fence->ring->idx, i); 654 ring->idx, i);
655 goto err2;
573 } else { 656 } else {
574 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 657 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
575 scratch, tmp); 658 scratch, tmp);
576 r = -EINVAL; 659 r = -EINVAL;
577 } 660 }
578 amdgpu_gfx_scratch_free(adev, scratch); 661err2:
662 fence_put(f);
579 amdgpu_ib_free(adev, &ib); 663 amdgpu_ib_free(adev, &ib);
664err1:
665 amdgpu_gfx_scratch_free(adev, scratch);
580 return r; 666 return r;
581} 667}
582 668
@@ -601,6 +687,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
601 case CHIP_CARRIZO: 687 case CHIP_CARRIZO:
602 chip_name = "carrizo"; 688 chip_name = "carrizo";
603 break; 689 break;
690 case CHIP_FIJI:
691 chip_name = "fiji";
692 break;
604 default: 693 default:
605 BUG(); 694 BUG();
606 } 695 }
@@ -1236,6 +1325,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1236 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; 1325 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1237 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1326 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1238 } 1327 }
1328 case CHIP_FIJI:
1239 case CHIP_TONGA: 1329 case CHIP_TONGA:
1240 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1330 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1241 switch (reg_offset) { 1331 switch (reg_offset) {
@@ -1984,6 +2074,23 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1984 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 2074 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1985 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN; 2075 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1986 break; 2076 break;
2077 case CHIP_FIJI:
2078 adev->gfx.config.max_shader_engines = 4;
2079 adev->gfx.config.max_tile_pipes = 16;
2080 adev->gfx.config.max_cu_per_sh = 16;
2081 adev->gfx.config.max_sh_per_se = 1;
2082 adev->gfx.config.max_backends_per_se = 4;
2083 adev->gfx.config.max_texture_channel_caches = 8;
2084 adev->gfx.config.max_gprs = 256;
2085 adev->gfx.config.max_gs_threads = 32;
2086 adev->gfx.config.max_hw_contexts = 8;
2087
2088 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2089 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2090 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2091 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2092 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
2093 break;
1987 case CHIP_TONGA: 2094 case CHIP_TONGA:
1988 adev->gfx.config.max_shader_engines = 4; 2095 adev->gfx.config.max_shader_engines = 4;
1989 adev->gfx.config.max_tile_pipes = 8; 2096 adev->gfx.config.max_tile_pipes = 8;
@@ -2078,7 +2185,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2078 2185
2079 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 2186 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
2080 adev->gfx.config.mem_max_burst_length_bytes = 256; 2187 adev->gfx.config.mem_max_burst_length_bytes = 256;
2081 if (adev->flags & AMDGPU_IS_APU) { 2188 if (adev->flags & AMD_IS_APU) {
2082 /* Get memory bank mapping mode. */ 2189 /* Get memory bank mapping mode. */
2083 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 2190 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
2084 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 2191 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2490,6 +2597,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2490 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 2597 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2491 switch (adev->asic_type) { 2598 switch (adev->asic_type) {
2492 case CHIP_TONGA: 2599 case CHIP_TONGA:
2600 case CHIP_FIJI:
2493 amdgpu_ring_write(ring, 0x16000012); 2601 amdgpu_ring_write(ring, 0x16000012);
2494 amdgpu_ring_write(ring, 0x0000002A); 2602 amdgpu_ring_write(ring, 0x0000002A);
2495 break; 2603 break;
@@ -3135,7 +3243,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3135 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 3243 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3136 AMDGPU_DOORBELL_KIQ << 2); 3244 AMDGPU_DOORBELL_KIQ << 2);
3137 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 3245 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3138 0x7FFFF << 2); 3246 AMDGPU_DOORBELL_MEC_RING7 << 2);
3139 } 3247 }
3140 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 3248 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3141 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3249 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3875,7 +3983,8 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
3875 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3983 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3876 3984
3877 if (ring->adev->asic_type == CHIP_TOPAZ || 3985 if (ring->adev->asic_type == CHIP_TOPAZ ||
3878 ring->adev->asic_type == CHIP_TONGA) 3986 ring->adev->asic_type == CHIP_TONGA ||
3987 ring->adev->asic_type == CHIP_FIJI)
3879 /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ 3988 /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
3880 return false; 3989 return false;
3881 else { 3990 else {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae37fce36520..10218828face 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -636,7 +636,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
636 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; 636 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
637 637
638 /* base offset of vram pages */ 638 /* base offset of vram pages */
639 if (adev->flags & AMDGPU_IS_APU) { 639 if (adev->flags & AMD_IS_APU) {
640 u64 tmp = RREG32(mmMC_VM_FB_OFFSET); 640 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
641 tmp <<= 22; 641 tmp <<= 22;
642 adev->vm_manager.vram_base_offset = tmp; 642 adev->vm_manager.vram_base_offset = tmp;
@@ -841,7 +841,7 @@ static int gmc_v7_0_early_init(void *handle)
841 gmc_v7_0_set_gart_funcs(adev); 841 gmc_v7_0_set_gart_funcs(adev);
842 gmc_v7_0_set_irq_funcs(adev); 842 gmc_v7_0_set_irq_funcs(adev);
843 843
844 if (adev->flags & AMDGPU_IS_APU) { 844 if (adev->flags & AMD_IS_APU) {
845 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 845 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
846 } else { 846 } else {
847 u32 tmp = RREG32(mmMC_SEQ_MISC0); 847 u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -957,7 +957,7 @@ static int gmc_v7_0_hw_init(void *handle)
957 957
958 gmc_v7_0_mc_program(adev); 958 gmc_v7_0_mc_program(adev);
959 959
960 if (!(adev->flags & AMDGPU_IS_APU)) { 960 if (!(adev->flags & AMD_IS_APU)) {
961 r = gmc_v7_0_mc_load_microcode(adev); 961 r = gmc_v7_0_mc_load_microcode(adev);
962 if (r) { 962 if (r) {
963 DRM_ERROR("Failed to load MC firmware!\n"); 963 DRM_ERROR("Failed to load MC firmware!\n");
@@ -1172,7 +1172,7 @@ static int gmc_v7_0_soft_reset(void *handle)
1172 1172
1173 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1173 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1174 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { 1174 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1175 if (!(adev->flags & AMDGPU_IS_APU)) 1175 if (!(adev->flags & AMD_IS_APU))
1176 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1176 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1177 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1177 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1178 } 1178 }
@@ -1282,7 +1282,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle,
1282 if (state == AMD_CG_STATE_GATE) 1282 if (state == AMD_CG_STATE_GATE)
1283 gate = true; 1283 gate = true;
1284 1284
1285 if (!(adev->flags & AMDGPU_IS_APU)) { 1285 if (!(adev->flags & AMD_IS_APU)) {
1286 gmc_v7_0_enable_mc_mgcg(adev, gate); 1286 gmc_v7_0_enable_mc_mgcg(adev, gate);
1287 gmc_v7_0_enable_mc_ls(adev, gate); 1287 gmc_v7_0_enable_mc_ls(adev, gate);
1288 } 1288 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 8135963a66be..78109b750d29 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -44,6 +44,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44 44
45MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); 45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 46MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
47 48
48static const u32 golden_settings_tonga_a11[] = 49static const u32 golden_settings_tonga_a11[] =
49{ 50{
@@ -61,6 +62,19 @@ static const u32 tonga_mgcg_cgcg_init[] =
61 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 62 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
62}; 63};
63 64
65static const u32 golden_settings_fiji_a10[] =
66{
67 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
68 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
69 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
70 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
71};
72
73static const u32 fiji_mgcg_cgcg_init[] =
74{
75 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
76};
77
64static const u32 golden_settings_iceland_a11[] = 78static const u32 golden_settings_iceland_a11[] =
65{ 79{
66 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 80 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -90,6 +104,14 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
90 golden_settings_iceland_a11, 104 golden_settings_iceland_a11,
91 (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); 105 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
92 break; 106 break;
107 case CHIP_FIJI:
108 amdgpu_program_register_sequence(adev,
109 fiji_mgcg_cgcg_init,
110 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
111 amdgpu_program_register_sequence(adev,
112 golden_settings_fiji_a10,
113 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
114 break;
93 case CHIP_TONGA: 115 case CHIP_TONGA:
94 amdgpu_program_register_sequence(adev, 116 amdgpu_program_register_sequence(adev,
95 tonga_mgcg_cgcg_init, 117 tonga_mgcg_cgcg_init,
@@ -202,6 +224,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
202 case CHIP_TONGA: 224 case CHIP_TONGA:
203 chip_name = "tonga"; 225 chip_name = "tonga";
204 break; 226 break;
227 case CHIP_FIJI:
228 chip_name = "fiji";
229 break;
205 case CHIP_CARRIZO: 230 case CHIP_CARRIZO:
206 return 0; 231 return 0;
207 default: BUG(); 232 default: BUG();
@@ -737,7 +762,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
737 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; 762 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
738 763
739 /* base offset of vram pages */ 764 /* base offset of vram pages */
740 if (adev->flags & AMDGPU_IS_APU) { 765 if (adev->flags & AMD_IS_APU) {
741 u64 tmp = RREG32(mmMC_VM_FB_OFFSET); 766 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
742 tmp <<= 22; 767 tmp <<= 22;
743 adev->vm_manager.vram_base_offset = tmp; 768 adev->vm_manager.vram_base_offset = tmp;
@@ -816,7 +841,7 @@ static int gmc_v8_0_early_init(void *handle)
816 gmc_v8_0_set_gart_funcs(adev); 841 gmc_v8_0_set_gart_funcs(adev);
817 gmc_v8_0_set_irq_funcs(adev); 842 gmc_v8_0_set_irq_funcs(adev);
818 843
819 if (adev->flags & AMDGPU_IS_APU) { 844 if (adev->flags & AMD_IS_APU) {
820 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 845 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
821 } else { 846 } else {
822 u32 tmp = RREG32(mmMC_SEQ_MISC0); 847 u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -934,7 +959,7 @@ static int gmc_v8_0_hw_init(void *handle)
934 959
935 gmc_v8_0_mc_program(adev); 960 gmc_v8_0_mc_program(adev);
936 961
937 if (!(adev->flags & AMDGPU_IS_APU)) { 962 if (!(adev->flags & AMD_IS_APU)) {
938 r = gmc_v8_0_mc_load_microcode(adev); 963 r = gmc_v8_0_mc_load_microcode(adev);
939 if (r) { 964 if (r) {
940 DRM_ERROR("Failed to load MC firmware!\n"); 965 DRM_ERROR("Failed to load MC firmware!\n");
@@ -1147,7 +1172,7 @@ static int gmc_v8_0_soft_reset(void *handle)
1147 1172
1148 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1173 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1149 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { 1174 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1150 if (!(adev->flags & AMDGPU_IS_APU)) 1175 if (!(adev->flags & AMD_IS_APU))
1151 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1176 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1152 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1177 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1153 } 1178 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a988dfb1d394..9de8104eddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -673,6 +673,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
673{ 673{
674 struct amdgpu_device *adev = ring->adev; 674 struct amdgpu_device *adev = ring->adev;
675 struct amdgpu_ib ib; 675 struct amdgpu_ib ib;
676 struct fence *f = NULL;
676 unsigned i; 677 unsigned i;
677 unsigned index; 678 unsigned index;
678 int r; 679 int r;
@@ -688,12 +689,10 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
688 gpu_addr = adev->wb.gpu_addr + (index * 4); 689 gpu_addr = adev->wb.gpu_addr + (index * 4);
689 tmp = 0xCAFEDEAD; 690 tmp = 0xCAFEDEAD;
690 adev->wb.wb[index] = cpu_to_le32(tmp); 691 adev->wb.wb[index] = cpu_to_le32(tmp);
691
692 r = amdgpu_ib_get(ring, NULL, 256, &ib); 692 r = amdgpu_ib_get(ring, NULL, 256, &ib);
693 if (r) { 693 if (r) {
694 amdgpu_wb_free(adev, index);
695 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 694 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
696 return r; 695 goto err0;
697 } 696 }
698 697
699 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 698 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -707,19 +706,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
707 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 706 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
708 ib.length_dw = 8; 707 ib.length_dw = 8;
709 708
710 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 709 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
711 if (r) { 710 AMDGPU_FENCE_OWNER_UNDEFINED,
712 amdgpu_ib_free(adev, &ib); 711 &f);
713 amdgpu_wb_free(adev, index); 712 if (r)
714 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 713 goto err1;
715 return r; 714
716 } 715 r = fence_wait(f, false);
717 r = amdgpu_fence_wait(ib.fence, false);
718 if (r) { 716 if (r) {
719 amdgpu_ib_free(adev, &ib);
720 amdgpu_wb_free(adev, index);
721 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 717 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
722 return r; 718 goto err1;
723 } 719 }
724 for (i = 0; i < adev->usec_timeout; i++) { 720 for (i = 0; i < adev->usec_timeout; i++) {
725 tmp = le32_to_cpu(adev->wb.wb[index]); 721 tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -729,12 +725,17 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
729 } 725 }
730 if (i < adev->usec_timeout) { 726 if (i < adev->usec_timeout) {
731 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 727 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
732 ib.fence->ring->idx, i); 728 ring->idx, i);
729 goto err1;
733 } else { 730 } else {
734 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 731 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
735 r = -EINVAL; 732 r = -EINVAL;
736 } 733 }
734
735err1:
736 fence_put(f);
737 amdgpu_ib_free(adev, &ib); 737 amdgpu_ib_free(adev, &ib);
738err0:
738 amdgpu_wb_free(adev, index); 739 amdgpu_wb_free(adev, index);
739 return r; 740 return r;
740} 741}
@@ -1415,5 +1416,6 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1415 if (adev->vm_manager.vm_pte_funcs == NULL) { 1416 if (adev->vm_manager.vm_pte_funcs == NULL) {
1416 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; 1417 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1417 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1418 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
1419 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1418 } 1420 }
1419} 1421}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2b86569b18d3..029f3455f9f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin"); 53MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin"); 54MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); 55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
56 58
57static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 59static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
58{ 60{
@@ -80,6 +82,24 @@ static const u32 tonga_mgcg_cgcg_init[] =
80 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 82 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
81}; 83};
82 84
85static const u32 golden_settings_fiji_a10[] =
86{
87 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
88 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
89 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
90 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
91 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
92 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
93 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
94 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
95};
96
97static const u32 fiji_mgcg_cgcg_init[] =
98{
99 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
100 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
101};
102
83static const u32 cz_golden_settings_a11[] = 103static const u32 cz_golden_settings_a11[] =
84{ 104{
85 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 105 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -122,6 +142,14 @@ static const u32 cz_mgcg_cgcg_init[] =
122static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) 142static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
123{ 143{
124 switch (adev->asic_type) { 144 switch (adev->asic_type) {
145 case CHIP_FIJI:
146 amdgpu_program_register_sequence(adev,
147 fiji_mgcg_cgcg_init,
148 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
149 amdgpu_program_register_sequence(adev,
150 golden_settings_fiji_a10,
151 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
152 break;
125 case CHIP_TONGA: 153 case CHIP_TONGA:
126 amdgpu_program_register_sequence(adev, 154 amdgpu_program_register_sequence(adev,
127 tonga_mgcg_cgcg_init, 155 tonga_mgcg_cgcg_init,
@@ -167,6 +195,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
167 case CHIP_TONGA: 195 case CHIP_TONGA:
168 chip_name = "tonga"; 196 chip_name = "tonga";
169 break; 197 break;
198 case CHIP_FIJI:
199 chip_name = "fiji";
200 break;
170 case CHIP_CARRIZO: 201 case CHIP_CARRIZO:
171 chip_name = "carrizo"; 202 chip_name = "carrizo";
172 break; 203 break;
@@ -763,6 +794,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
763{ 794{
764 struct amdgpu_device *adev = ring->adev; 795 struct amdgpu_device *adev = ring->adev;
765 struct amdgpu_ib ib; 796 struct amdgpu_ib ib;
797 struct fence *f = NULL;
766 unsigned i; 798 unsigned i;
767 unsigned index; 799 unsigned index;
768 int r; 800 int r;
@@ -778,12 +810,10 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
778 gpu_addr = adev->wb.gpu_addr + (index * 4); 810 gpu_addr = adev->wb.gpu_addr + (index * 4);
779 tmp = 0xCAFEDEAD; 811 tmp = 0xCAFEDEAD;
780 adev->wb.wb[index] = cpu_to_le32(tmp); 812 adev->wb.wb[index] = cpu_to_le32(tmp);
781
782 r = amdgpu_ib_get(ring, NULL, 256, &ib); 813 r = amdgpu_ib_get(ring, NULL, 256, &ib);
783 if (r) { 814 if (r) {
784 amdgpu_wb_free(adev, index);
785 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 815 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
786 return r; 816 goto err0;
787 } 817 }
788 818
789 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 819 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -797,19 +827,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
797 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 827 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
798 ib.length_dw = 8; 828 ib.length_dw = 8;
799 829
800 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 830 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
801 if (r) { 831 AMDGPU_FENCE_OWNER_UNDEFINED,
802 amdgpu_ib_free(adev, &ib); 832 &f);
803 amdgpu_wb_free(adev, index); 833 if (r)
804 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 834 goto err1;
805 return r; 835
806 } 836 r = fence_wait(f, false);
807 r = amdgpu_fence_wait(ib.fence, false);
808 if (r) { 837 if (r) {
809 amdgpu_ib_free(adev, &ib);
810 amdgpu_wb_free(adev, index);
811 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 838 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
812 return r; 839 goto err1;
813 } 840 }
814 for (i = 0; i < adev->usec_timeout; i++) { 841 for (i = 0; i < adev->usec_timeout; i++) {
815 tmp = le32_to_cpu(adev->wb.wb[index]); 842 tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -819,12 +846,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
819 } 846 }
820 if (i < adev->usec_timeout) { 847 if (i < adev->usec_timeout) {
821 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 848 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
822 ib.fence->ring->idx, i); 849 ring->idx, i);
850 goto err1;
823 } else { 851 } else {
824 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 852 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
825 r = -EINVAL; 853 r = -EINVAL;
826 } 854 }
855err1:
856 fence_put(f);
827 amdgpu_ib_free(adev, &ib); 857 amdgpu_ib_free(adev, &ib);
858err0:
828 amdgpu_wb_free(adev, index); 859 amdgpu_wb_free(adev, index);
829 return r; 860 return r;
830} 861}
@@ -1509,5 +1540,6 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1509 if (adev->vm_manager.vm_pte_funcs == NULL) { 1540 if (adev->vm_manager.vm_pte_funcs == NULL) {
1510 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; 1541 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1511 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1542 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
1543 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1512 } 1544 }
1513} 1545}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 4efd671d7a9b..9ac383bc6c1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
534static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) 534static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
535{ 535{
536 struct amdgpu_device *adev = ring->adev; 536 struct amdgpu_device *adev = ring->adev;
537 struct amdgpu_fence *fence = NULL; 537 struct fence *fence = NULL;
538 int r; 538 int r;
539 539
540 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 540 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
555 goto error; 555 goto error;
556 } 556 }
557 557
558 r = amdgpu_fence_wait(fence, false); 558 r = fence_wait(fence, false);
559 if (r) { 559 if (r) {
560 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 560 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
561 goto error; 561 goto error;
562 } 562 }
563 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 563 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
564error: 564error:
565 amdgpu_fence_unref(&fence); 565 fence_put(fence);
566 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 566 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
567 return r; 567 return r;
568} 568}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index b756bd99c0fd..de4b3f57902d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
580static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) 580static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
581{ 581{
582 struct amdgpu_device *adev = ring->adev; 582 struct amdgpu_device *adev = ring->adev;
583 struct amdgpu_fence *fence = NULL; 583 struct fence *fence = NULL;
584 int r; 584 int r;
585 585
586 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 586 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
601 goto error; 601 goto error;
602 } 602 }
603 603
604 r = amdgpu_fence_wait(fence, false); 604 r = fence_wait(fence, false);
605 if (r) { 605 if (r) {
606 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 606 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
607 goto error; 607 goto error;
608 } 608 }
609 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 609 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
610error: 610error:
611 amdgpu_fence_unref(&fence); 611 fence_put(fence);
612 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 612 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
613 return r; 613 return r;
614} 614}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 49aa931b2cb4..66c975870e97 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -575,7 +575,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
575 */ 575 */
576static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) 576static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
577{ 577{
578 struct amdgpu_fence *fence = NULL; 578 struct fence *fence = NULL;
579 int r; 579 int r;
580 580
581 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 581 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -590,14 +590,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
590 goto error; 590 goto error;
591 } 591 }
592 592
593 r = amdgpu_fence_wait(fence, false); 593 r = fence_wait(fence, false);
594 if (r) { 594 if (r) {
595 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 595 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
596 goto error; 596 goto error;
597 } 597 }
598 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 598 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
599error: 599error:
600 amdgpu_fence_unref(&fence); 600 fence_put(fence);
601 return r; 601 return r;
602} 602}
603 603
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d1064ca3670e..4349658081ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -205,7 +205,14 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
205 u32 tmp; 205 u32 tmp;
206 unsigned ret; 206 unsigned ret;
207 207
208 if (adev->flags & AMDGPU_IS_APU) 208 /* Fiji is single pipe */
209 if (adev->asic_type == CHIP_FIJI) {
210 ret = AMDGPU_VCE_HARVEST_VCE1;
211 return ret;
212 }
213
214 /* Tonga and CZ are dual or single pipe */
215 if (adev->flags & AMD_IS_APU)
209 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 216 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
210 VCE_HARVEST_FUSE_MACRO__MASK) >> 217 VCE_HARVEST_FUSE_MACRO__MASK) >>
211 VCE_HARVEST_FUSE_MACRO__SHIFT; 218 VCE_HARVEST_FUSE_MACRO__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 68552da40287..552d9e75ad1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -203,6 +203,17 @@ static const u32 tonga_mgcg_cgcg_init[] =
203 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 203 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
204}; 204};
205 205
206static const u32 fiji_mgcg_cgcg_init[] =
207{
208 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
209 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
210 mmPCIE_DATA, 0x000f0000, 0x00000000,
211 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
212 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
213 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
214 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
215};
216
206static const u32 iceland_mgcg_cgcg_init[] = 217static const u32 iceland_mgcg_cgcg_init[] =
207{ 218{
208 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 219 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
@@ -232,6 +243,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
232 iceland_mgcg_cgcg_init, 243 iceland_mgcg_cgcg_init,
233 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 244 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
234 break; 245 break;
246 case CHIP_FIJI:
247 amdgpu_program_register_sequence(adev,
248 fiji_mgcg_cgcg_init,
249 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
250 break;
235 case CHIP_TONGA: 251 case CHIP_TONGA:
236 amdgpu_program_register_sequence(adev, 252 amdgpu_program_register_sequence(adev,
237 tonga_mgcg_cgcg_init, 253 tonga_mgcg_cgcg_init,
@@ -261,7 +277,7 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
261 u32 reference_clock = adev->clock.spll.reference_freq; 277 u32 reference_clock = adev->clock.spll.reference_freq;
262 u32 tmp; 278 u32 tmp;
263 279
264 if (adev->flags & AMDGPU_IS_APU) 280 if (adev->flags & AMD_IS_APU)
265 return reference_clock; 281 return reference_clock;
266 282
267 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 283 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
@@ -362,6 +378,26 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
362 378
363static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 379static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
364 {mmGRBM_STATUS, false}, 380 {mmGRBM_STATUS, false},
381 {mmGRBM_STATUS2, false},
382 {mmGRBM_STATUS_SE0, false},
383 {mmGRBM_STATUS_SE1, false},
384 {mmGRBM_STATUS_SE2, false},
385 {mmGRBM_STATUS_SE3, false},
386 {mmSRBM_STATUS, false},
387 {mmSRBM_STATUS2, false},
388 {mmSRBM_STATUS3, false},
389 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
390 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
391 {mmCP_STAT, false},
392 {mmCP_STALLED_STAT1, false},
393 {mmCP_STALLED_STAT2, false},
394 {mmCP_STALLED_STAT3, false},
395 {mmCP_CPF_BUSY_STAT, false},
396 {mmCP_CPF_STALLED_STAT1, false},
397 {mmCP_CPF_STATUS, false},
398 {mmCP_CPC_BUSY_STAT, false},
399 {mmCP_CPC_STALLED_STAT1, false},
400 {mmCP_CPC_STATUS, false},
365 {mmGB_ADDR_CONFIG, false}, 401 {mmGB_ADDR_CONFIG, false},
366 {mmMC_ARB_RAMCFG, false}, 402 {mmMC_ARB_RAMCFG, false},
367 {mmGB_TILE_MODE0, false}, 403 {mmGB_TILE_MODE0, false},
@@ -449,6 +485,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
449 asic_register_table = tonga_allowed_read_registers; 485 asic_register_table = tonga_allowed_read_registers;
450 size = ARRAY_SIZE(tonga_allowed_read_registers); 486 size = ARRAY_SIZE(tonga_allowed_read_registers);
451 break; 487 break;
488 case CHIP_FIJI:
452 case CHIP_TONGA: 489 case CHIP_TONGA:
453 case CHIP_CARRIZO: 490 case CHIP_CARRIZO:
454 asic_register_table = cz_allowed_read_registers; 491 asic_register_table = cz_allowed_read_registers;
@@ -751,7 +788,7 @@ static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
751 srbm_soft_reset = 788 srbm_soft_reset =
752 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 789 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
753 790
754 if (!(adev->flags & AMDGPU_IS_APU)) { 791 if (!(adev->flags & AMD_IS_APU)) {
755 if (reset_mask & AMDGPU_RESET_MC) 792 if (reset_mask & AMDGPU_RESET_MC)
756 srbm_soft_reset = 793 srbm_soft_reset =
757 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 794 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
@@ -971,7 +1008,7 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
971 if (amdgpu_pcie_gen2 == 0) 1008 if (amdgpu_pcie_gen2 == 0)
972 return; 1009 return;
973 1010
974 if (adev->flags & AMDGPU_IS_APU) 1011 if (adev->flags & AMD_IS_APU)
975 return; 1012 return;
976 1013
977 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1014 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -999,7 +1036,7 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
999 u32 tmp; 1036 u32 tmp;
1000 1037
1001 /* not necessary on CZ */ 1038 /* not necessary on CZ */
1002 if (adev->flags & AMDGPU_IS_APU) 1039 if (adev->flags & AMD_IS_APU)
1003 return; 1040 return;
1004 1041
1005 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 1042 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
@@ -1127,6 +1164,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1127 }, 1164 },
1128}; 1165};
1129 1166
1167static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1168{
1169 /* ORDER MATTERS! */
1170 {
1171 .type = AMD_IP_BLOCK_TYPE_COMMON,
1172 .major = 2,
1173 .minor = 0,
1174 .rev = 0,
1175 .funcs = &vi_common_ip_funcs,
1176 },
1177 {
1178 .type = AMD_IP_BLOCK_TYPE_GMC,
1179 .major = 8,
1180 .minor = 5,
1181 .rev = 0,
1182 .funcs = &gmc_v8_0_ip_funcs,
1183 },
1184 {
1185 .type = AMD_IP_BLOCK_TYPE_IH,
1186 .major = 3,
1187 .minor = 0,
1188 .rev = 0,
1189 .funcs = &tonga_ih_ip_funcs,
1190 },
1191 {
1192 .type = AMD_IP_BLOCK_TYPE_SMC,
1193 .major = 7,
1194 .minor = 1,
1195 .rev = 0,
1196 .funcs = &fiji_dpm_ip_funcs,
1197 },
1198 {
1199 .type = AMD_IP_BLOCK_TYPE_DCE,
1200 .major = 10,
1201 .minor = 1,
1202 .rev = 0,
1203 .funcs = &dce_v10_0_ip_funcs,
1204 },
1205 {
1206 .type = AMD_IP_BLOCK_TYPE_GFX,
1207 .major = 8,
1208 .minor = 0,
1209 .rev = 0,
1210 .funcs = &gfx_v8_0_ip_funcs,
1211 },
1212 {
1213 .type = AMD_IP_BLOCK_TYPE_SDMA,
1214 .major = 3,
1215 .minor = 0,
1216 .rev = 0,
1217 .funcs = &sdma_v3_0_ip_funcs,
1218 },
1219 {
1220 .type = AMD_IP_BLOCK_TYPE_UVD,
1221 .major = 6,
1222 .minor = 0,
1223 .rev = 0,
1224 .funcs = &uvd_v6_0_ip_funcs,
1225 },
1226 {
1227 .type = AMD_IP_BLOCK_TYPE_VCE,
1228 .major = 3,
1229 .minor = 0,
1230 .rev = 0,
1231 .funcs = &vce_v3_0_ip_funcs,
1232 },
1233};
1234
1130static const struct amdgpu_ip_block_version cz_ip_blocks[] = 1235static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1131{ 1236{
1132 /* ORDER MATTERS! */ 1237 /* ORDER MATTERS! */
@@ -1202,6 +1307,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1202 adev->ip_blocks = topaz_ip_blocks; 1307 adev->ip_blocks = topaz_ip_blocks;
1203 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 1308 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1204 break; 1309 break;
1310 case CHIP_FIJI:
1311 adev->ip_blocks = fiji_ip_blocks;
1312 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1313 break;
1205 case CHIP_TONGA: 1314 case CHIP_TONGA:
1206 adev->ip_blocks = tonga_ip_blocks; 1315 adev->ip_blocks = tonga_ip_blocks;
1207 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1316 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
@@ -1248,7 +1357,7 @@ static int vi_common_early_init(void *handle)
1248 bool smc_enabled = false; 1357 bool smc_enabled = false;
1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1358 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250 1359
1251 if (adev->flags & AMDGPU_IS_APU) { 1360 if (adev->flags & AMD_IS_APU) {
1252 adev->smc_rreg = &cz_smc_rreg; 1361 adev->smc_rreg = &cz_smc_rreg;
1253 adev->smc_wreg = &cz_smc_wreg; 1362 adev->smc_wreg = &cz_smc_wreg;
1254 } else { 1363 } else {
@@ -1279,6 +1388,7 @@ static int vi_common_early_init(void *handle)
1279 if (amdgpu_smc_load_fw && smc_enabled) 1388 if (amdgpu_smc_load_fw && smc_enabled)
1280 adev->firmware.smu_load = true; 1389 adev->firmware.smu_load = true;
1281 break; 1390 break;
1391 case CHIP_FIJI:
1282 case CHIP_TONGA: 1392 case CHIP_TONGA:
1283 adev->has_uvd = true; 1393 adev->has_uvd = true;
1284 adev->cg_flags = 0; 1394 adev->cg_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
index 3b45332f5df4..fc120ba18aad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
@@ -30,7 +30,7 @@ int cz_smu_start(struct amdgpu_device *adev);
30int cz_smu_fini(struct amdgpu_device *adev); 30int cz_smu_fini(struct amdgpu_device *adev);
31 31
32extern const struct amd_ip_funcs tonga_dpm_ip_funcs; 32extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
33 33extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
34extern const struct amd_ip_funcs iceland_dpm_ip_funcs; 34extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
35 35
36#endif 36#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 23ce774ff09d..c6f435aa803f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -143,7 +143,7 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
143 get_sh_mem_bases_32(qpd_to_pdd(qpd)); 143 get_sh_mem_bases_32(qpd_to_pdd(qpd));
144 else 144 else
145 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << 145 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
146 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) && 146 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
147 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; 147 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
148 148
149 q->properties.sdma_vm_addr = value; 149 q->properties.sdma_vm_addr = value;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 44c38e8e54d3..7e9cae9d349b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -155,7 +155,7 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
155 get_sh_mem_bases_32(qpd_to_pdd(qpd)); 155 get_sh_mem_bases_32(qpd_to_pdd(qpd));
156 else 156 else
157 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << 157 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
158 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) && 158 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
159 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; 159 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
160 160
161 q->properties.sdma_vm_addr = value; 161 q->properties.sdma_vm_addr = value;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 5bdf1b4397a0..68a8eaa1b7d0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,6 +23,45 @@
23#ifndef __AMD_SHARED_H__ 23#ifndef __AMD_SHARED_H__
24#define __AMD_SHARED_H__ 24#define __AMD_SHARED_H__
25 25
26#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
27
28/*
29* Supported GPU families (aligned with amdgpu_drm.h)
30*/
31#define AMD_FAMILY_UNKNOWN 0
32#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */
33#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
34#define AMD_FAMILY_VI 130 /* Iceland, Tonga */
35#define AMD_FAMILY_CZ 135 /* Carrizo */
36
37/*
38 * Supported ASIC types
39 */
40enum amd_asic_type {
41 CHIP_BONAIRE = 0,
42 CHIP_KAVERI,
43 CHIP_KABINI,
44 CHIP_HAWAII,
45 CHIP_MULLINS,
46 CHIP_TOPAZ,
47 CHIP_TONGA,
48 CHIP_FIJI,
49 CHIP_CARRIZO,
50 CHIP_LAST,
51};
52
53/*
54 * Chip flags
55 */
56enum amd_chip_flags {
57 AMD_ASIC_MASK = 0x0000ffffUL,
58 AMD_FLAGS_MASK = 0xffff0000UL,
59 AMD_IS_MOBILITY = 0x00010000UL,
60 AMD_IS_APU = 0x00020000UL,
61 AMD_IS_PX = 0x00040000UL,
62 AMD_EXP_HW_SUPPORT = 0x00080000UL,
63};
64
26enum amd_ip_block_type { 65enum amd_ip_block_type {
27 AMD_IP_BLOCK_TYPE_COMMON, 66 AMD_IP_BLOCK_TYPE_COMMON,
28 AMD_IP_BLOCK_TYPE_GMC, 67 AMD_IP_BLOCK_TYPE_GMC,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
new file mode 100644
index 000000000000..44b1855cb8df
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -0,0 +1,1246 @@
1/*
2 * SMU_7_1_3 Register documentation
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
20 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef SMU_7_1_3_D_H
25#define SMU_7_1_3_D_H
26
27#define mmGCK_SMC_IND_INDEX 0x80
28#define mmGCK0_GCK_SMC_IND_INDEX 0x80
29#define mmGCK1_GCK_SMC_IND_INDEX 0x82
30#define mmGCK2_GCK_SMC_IND_INDEX 0x84
31#define mmGCK3_GCK_SMC_IND_INDEX 0x86
32#define mmGCK_SMC_IND_DATA 0x81
33#define mmGCK0_GCK_SMC_IND_DATA 0x81
34#define mmGCK1_GCK_SMC_IND_DATA 0x83
35#define mmGCK2_GCK_SMC_IND_DATA 0x85
36#define mmGCK3_GCK_SMC_IND_DATA 0x87
37#define ixGCK_MCLK_FUSES 0xc0500008
38#define ixCG_DCLK_CNTL 0xc050009c
39#define ixCG_DCLK_STATUS 0xc05000a0
40#define ixCG_VCLK_CNTL 0xc05000a4
41#define ixCG_VCLK_STATUS 0xc05000a8
42#define ixCG_ECLK_CNTL 0xc05000ac
43#define ixCG_ECLK_STATUS 0xc05000b0
44#define ixCG_ACLK_CNTL 0xc05000dc
45#define ixCG_MCLK_CNTL 0xc0500120
46#define ixCG_MCLK_STATUS 0xc0500124
47#define ixGCK_DFS_BYPASS_CNTL 0xc0500118
48#define ixCG_SPLL_FUNC_CNTL 0xc0500140
49#define ixCG_SPLL_FUNC_CNTL_2 0xc0500144
50#define ixCG_SPLL_FUNC_CNTL_3 0xc0500148
51#define ixCG_SPLL_FUNC_CNTL_4 0xc050014c
52#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
53#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
54#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
55#define ixSPLL_CNTL_MODE 0xc0500160
56#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
57#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
58#define ixMPLL_BYPASSCLK_SEL 0xc050019c
59#define ixCG_CLKPIN_CNTL 0xc05001a0
60#define ixCG_CLKPIN_CNTL_2 0xc05001a4
61#define ixCG_CLKPIN_CNTL_DC 0xc0500204
62#define ixTHM_CLK_CNTL 0xc05001a8
63#define ixMISC_CLK_CTRL 0xc05001ac
64#define ixGCK_PLL_TEST_CNTL 0xc05001c0
65#define ixGCK_PLL_TEST_CNTL_2 0xc05001c4
66#define ixGCK_ADFS_CLK_BYPASS_CNTL1 0xc05001c8
67#define mmSMC_IND_INDEX 0x80
68#define mmSMC0_SMC_IND_INDEX 0x80
69#define mmSMC1_SMC_IND_INDEX 0x82
70#define mmSMC2_SMC_IND_INDEX 0x84
71#define mmSMC3_SMC_IND_INDEX 0x86
72#define mmSMC_IND_DATA 0x81
73#define mmSMC0_SMC_IND_DATA 0x81
74#define mmSMC1_SMC_IND_DATA 0x83
75#define mmSMC2_SMC_IND_DATA 0x85
76#define mmSMC3_SMC_IND_DATA 0x87
77#define mmSMC_IND_INDEX_0 0x80
78#define mmSMC_IND_DATA_0 0x81
79#define mmSMC_IND_INDEX_1 0x82
80#define mmSMC_IND_DATA_1 0x83
81#define mmSMC_IND_INDEX_2 0x84
82#define mmSMC_IND_DATA_2 0x85
83#define mmSMC_IND_INDEX_3 0x86
84#define mmSMC_IND_DATA_3 0x87
85#define mmSMC_IND_INDEX_4 0x88
86#define mmSMC_IND_DATA_4 0x89
87#define mmSMC_IND_INDEX_5 0x8a
88#define mmSMC_IND_DATA_5 0x8b
89#define mmSMC_IND_INDEX_6 0x8c
90#define mmSMC_IND_DATA_6 0x8d
91#define mmSMC_IND_INDEX_7 0x8e
92#define mmSMC_IND_DATA_7 0x8f
93#define mmSMC_IND_ACCESS_CNTL 0x92
94#define mmSMC_MESSAGE_0 0x94
95#define mmSMC_RESP_0 0x95
96#define mmSMC_MESSAGE_1 0x96
97#define mmSMC_RESP_1 0x97
98#define mmSMC_MESSAGE_2 0x98
99#define mmSMC_RESP_2 0x99
100#define mmSMC_MESSAGE_3 0x9a
101#define mmSMC_RESP_3 0x9b
102#define mmSMC_MESSAGE_4 0x9c
103#define mmSMC_RESP_4 0x9d
104#define mmSMC_MESSAGE_5 0x9e
105#define mmSMC_RESP_5 0x9f
106#define mmSMC_MESSAGE_6 0xa0
107#define mmSMC_RESP_6 0xa1
108#define mmSMC_MESSAGE_7 0xa2
109#define mmSMC_RESP_7 0xa3
110#define mmSMC_MSG_ARG_0 0xa4
111#define mmSMC_MSG_ARG_1 0xa5
112#define mmSMC_MSG_ARG_2 0xa6
113#define mmSMC_MSG_ARG_3 0xa7
114#define mmSMC_MSG_ARG_4 0xa8
115#define mmSMC_MSG_ARG_5 0xa9
116#define mmSMC_MSG_ARG_6 0xaa
117#define mmSMC_MSG_ARG_7 0xab
118#define mmSMC_MESSAGE_8 0xb5
119#define mmSMC_RESP_8 0xb6
120#define mmSMC_MESSAGE_9 0xb7
121#define mmSMC_RESP_9 0xb8
122#define mmSMC_MESSAGE_10 0xb9
123#define mmSMC_RESP_10 0xba
124#define mmSMC_MESSAGE_11 0xbb
125#define mmSMC_RESP_11 0xbc
126#define mmSMC_MSG_ARG_8 0xbd
127#define mmSMC_MSG_ARG_9 0xbe
128#define mmSMC_MSG_ARG_10 0xbf
129#define mmSMC_MSG_ARG_11 0x93
130#define ixSMC_SYSCON_RESET_CNTL 0x80000000
131#define ixSMC_SYSCON_CLOCK_CNTL_0 0x80000004
132#define ixSMC_SYSCON_CLOCK_CNTL_1 0x80000008
133#define ixSMC_SYSCON_CLOCK_CNTL_2 0x8000000c
134#define ixSMC_SYSCON_MISC_CNTL 0x80000010
135#define ixSMC_SYSCON_MSG_ARG_0 0x80000068
136#define ixSMC_PC_C 0x80000370
137#define ixSMC_SCRATCH9 0x80000424
138#define mmGPIOPAD_SW_INT_STAT 0x180
139#define mmGPIOPAD_STRENGTH 0x181
140#define mmGPIOPAD_MASK 0x182
141#define mmGPIOPAD_A 0x183
142#define mmGPIOPAD_EN 0x184
143#define mmGPIOPAD_Y 0x185
144#define mmGPIOPAD_PINSTRAPS 0x186
145#define mmGPIOPAD_INT_STAT_EN 0x187
146#define mmGPIOPAD_INT_STAT 0x188
147#define mmGPIOPAD_INT_STAT_AK 0x189
148#define mmGPIOPAD_INT_EN 0x18a
149#define mmGPIOPAD_INT_TYPE 0x18b
150#define mmGPIOPAD_INT_POLARITY 0x18c
151#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x18d
152#define mmGPIOPAD_RCVR_SEL 0x191
153#define mmGPIOPAD_PU_EN 0x192
154#define mmGPIOPAD_PD_EN 0x193
155#define mmCG_FPS_CNT 0x1b6
156#define mmSMU_IND_INDEX_0 0x1a6
157#define mmSMU_IND_DATA_0 0x1a7
158#define mmSMU_IND_INDEX_1 0x1a8
159#define mmSMU_IND_DATA_1 0x1a9
160#define mmSMU_IND_INDEX_2 0x1aa
161#define mmSMU_IND_DATA_2 0x1ab
162#define mmSMU_IND_INDEX_3 0x1ac
163#define mmSMU_IND_DATA_3 0x1ad
164#define mmSMU_IND_INDEX_4 0x1ae
165#define mmSMU_IND_DATA_4 0x1af
166#define mmSMU_IND_INDEX_5 0x1b0
167#define mmSMU_IND_DATA_5 0x1b1
168#define mmSMU_IND_INDEX_6 0x1b2
169#define mmSMU_IND_DATA_6 0x1b3
170#define mmSMU_IND_INDEX_7 0x1b4
171#define mmSMU_IND_DATA_7 0x1b5
172#define mmSMU_SMC_IND_INDEX 0x80
173#define mmSMU0_SMU_SMC_IND_INDEX 0x80
174#define mmSMU1_SMU_SMC_IND_INDEX 0x82
175#define mmSMU2_SMU_SMC_IND_INDEX 0x84
176#define mmSMU3_SMU_SMC_IND_INDEX 0x86
177#define mmSMU_SMC_IND_DATA 0x81
178#define mmSMU0_SMU_SMC_IND_DATA 0x81
179#define mmSMU1_SMU_SMC_IND_DATA 0x83
180#define mmSMU2_SMU_SMC_IND_DATA 0x85
181#define mmSMU3_SMU_SMC_IND_DATA 0x87
182#define ixRCU_UC_EVENTS 0xc0000004
183#define ixRCU_MISC_CTRL 0xc0000010
184#define ixRCU_VIRT_RESET_REQ 0xc0000024
185#define ixCC_RCU_FUSES 0xc00c0000
186#define ixCC_SMU_MISC_FUSES 0xc00c0004
187#define ixCC_SCLK_VID_FUSES 0xc00c0008
188#define ixCC_GIO_IOCCFG_FUSES 0xc00c000c
189#define ixCC_GIO_IOC_FUSES 0xc00c0010
190#define ixCC_SMU_TST_EFUSE1_MISC 0xc00c001c
191#define ixCC_TST_ID_STRAPS 0xc00c0020
192#define ixCC_FCTRL_FUSES 0xc00c0024
193#define ixCC_HARVEST_FUSES 0xc00c0028
194#define ixSMU_MAIN_PLL_OP_FREQ 0xe0003020
195#define ixSMU_STATUS 0xe0003088
196#define ixSMU_FIRMWARE 0xe00030a4
197#define ixSMU_INPUT_DATA 0xe00030b8
198#define ixSMU_EFUSE_0 0xc0100000
199#define ixFIRMWARE_FLAGS 0x3f000
200#define ixTDC_STATUS 0x3f004
201#define ixTDC_MV_AVERAGE 0x3f008
202#define ixTDC_VRM_LIMIT 0x3f00c
203#define ixFEATURE_STATUS 0x3f010
204#define ixENTITY_TEMPERATURES_1 0x3f014
205#define ixMCARB_DRAM_TIMING_TABLE_1 0x3f018
206#define ixMCARB_DRAM_TIMING_TABLE_2 0x3f01c
207#define ixMCARB_DRAM_TIMING_TABLE_3 0x3f020
208#define ixMCARB_DRAM_TIMING_TABLE_4 0x3f024
209#define ixMCARB_DRAM_TIMING_TABLE_5 0x3f028
210#define ixMCARB_DRAM_TIMING_TABLE_6 0x3f02c
211#define ixMCARB_DRAM_TIMING_TABLE_7 0x3f030
212#define ixMCARB_DRAM_TIMING_TABLE_8 0x3f034
213#define ixMCARB_DRAM_TIMING_TABLE_9 0x3f038
214#define ixMCARB_DRAM_TIMING_TABLE_10 0x3f03c
215#define ixMCARB_DRAM_TIMING_TABLE_11 0x3f040
216#define ixMCARB_DRAM_TIMING_TABLE_12 0x3f044
217#define ixMCARB_DRAM_TIMING_TABLE_13 0x3f048
218#define ixMCARB_DRAM_TIMING_TABLE_14 0x3f04c
219#define ixMCARB_DRAM_TIMING_TABLE_15 0x3f050
220#define ixMCARB_DRAM_TIMING_TABLE_16 0x3f054
221#define ixMCARB_DRAM_TIMING_TABLE_17 0x3f058
222#define ixMCARB_DRAM_TIMING_TABLE_18 0x3f05c
223#define ixMCARB_DRAM_TIMING_TABLE_19 0x3f060
224#define ixMCARB_DRAM_TIMING_TABLE_20 0x3f064
225#define ixMCARB_DRAM_TIMING_TABLE_21 0x3f068
226#define ixMCARB_DRAM_TIMING_TABLE_22 0x3f06c
227#define ixMCARB_DRAM_TIMING_TABLE_23 0x3f070
228#define ixMCARB_DRAM_TIMING_TABLE_24 0x3f074
229#define ixMCARB_DRAM_TIMING_TABLE_25 0x3f078
230#define ixMCARB_DRAM_TIMING_TABLE_26 0x3f07c
231#define ixMCARB_DRAM_TIMING_TABLE_27 0x3f080
232#define ixMCARB_DRAM_TIMING_TABLE_28 0x3f084
233#define ixMCARB_DRAM_TIMING_TABLE_29 0x3f088
234#define ixMCARB_DRAM_TIMING_TABLE_30 0x3f08c
235#define ixMCARB_DRAM_TIMING_TABLE_31 0x3f090
236#define ixMCARB_DRAM_TIMING_TABLE_32 0x3f094
237#define ixMCARB_DRAM_TIMING_TABLE_33 0x3f098
238#define ixMCARB_DRAM_TIMING_TABLE_34 0x3f09c
239#define ixMCARB_DRAM_TIMING_TABLE_35 0x3f0a0
240#define ixMCARB_DRAM_TIMING_TABLE_36 0x3f0a4
241#define ixMCARB_DRAM_TIMING_TABLE_37 0x3f0a8
242#define ixMCARB_DRAM_TIMING_TABLE_38 0x3f0ac
243#define ixMCARB_DRAM_TIMING_TABLE_39 0x3f0b0
244#define ixMCARB_DRAM_TIMING_TABLE_40 0x3f0b4
245#define ixMCARB_DRAM_TIMING_TABLE_41 0x3f0b8
246#define ixMCARB_DRAM_TIMING_TABLE_42 0x3f0bc
247#define ixMCARB_DRAM_TIMING_TABLE_43 0x3f0c0
248#define ixMCARB_DRAM_TIMING_TABLE_44 0x3f0c4
249#define ixMCARB_DRAM_TIMING_TABLE_45 0x3f0c8
250#define ixMCARB_DRAM_TIMING_TABLE_46 0x3f0cc
251#define ixMCARB_DRAM_TIMING_TABLE_47 0x3f0d0
252#define ixMCARB_DRAM_TIMING_TABLE_48 0x3f0d4
253#define ixMCARB_DRAM_TIMING_TABLE_49 0x3f0d8
254#define ixMCARB_DRAM_TIMING_TABLE_50 0x3f0dc
255#define ixMCARB_DRAM_TIMING_TABLE_51 0x3f0e0
256#define ixMCARB_DRAM_TIMING_TABLE_52 0x3f0e4
257#define ixMCARB_DRAM_TIMING_TABLE_53 0x3f0e8
258#define ixMCARB_DRAM_TIMING_TABLE_54 0x3f0ec
259#define ixMCARB_DRAM_TIMING_TABLE_55 0x3f0f0
260#define ixMCARB_DRAM_TIMING_TABLE_56 0x3f0f4
261#define ixMCARB_DRAM_TIMING_TABLE_57 0x3f0f8
262#define ixMCARB_DRAM_TIMING_TABLE_58 0x3f0fc
263#define ixMCARB_DRAM_TIMING_TABLE_59 0x3f100
264#define ixMCARB_DRAM_TIMING_TABLE_60 0x3f104
265#define ixMCARB_DRAM_TIMING_TABLE_61 0x3f108
266#define ixMCARB_DRAM_TIMING_TABLE_62 0x3f10c
267#define ixMCARB_DRAM_TIMING_TABLE_63 0x3f110
268#define ixMCARB_DRAM_TIMING_TABLE_64 0x3f114
269#define ixMCARB_DRAM_TIMING_TABLE_65 0x3f118
270#define ixMCARB_DRAM_TIMING_TABLE_66 0x3f11c
271#define ixMCARB_DRAM_TIMING_TABLE_67 0x3f120
272#define ixMCARB_DRAM_TIMING_TABLE_68 0x3f124
273#define ixMCARB_DRAM_TIMING_TABLE_69 0x3f128
274#define ixMCARB_DRAM_TIMING_TABLE_70 0x3f12c
275#define ixMCARB_DRAM_TIMING_TABLE_71 0x3f130
276#define ixMCARB_DRAM_TIMING_TABLE_72 0x3f134
277#define ixMCARB_DRAM_TIMING_TABLE_73 0x3f138
278#define ixMCARB_DRAM_TIMING_TABLE_74 0x3f13c
279#define ixMCARB_DRAM_TIMING_TABLE_75 0x3f140
280#define ixMCARB_DRAM_TIMING_TABLE_76 0x3f144
281#define ixMCARB_DRAM_TIMING_TABLE_77 0x3f148
282#define ixMCARB_DRAM_TIMING_TABLE_78 0x3f14c
283#define ixMCARB_DRAM_TIMING_TABLE_79 0x3f150
284#define ixMCARB_DRAM_TIMING_TABLE_80 0x3f154
285#define ixMCARB_DRAM_TIMING_TABLE_81 0x3f158
286#define ixMCARB_DRAM_TIMING_TABLE_82 0x3f15c
287#define ixMCARB_DRAM_TIMING_TABLE_83 0x3f160
288#define ixMCARB_DRAM_TIMING_TABLE_84 0x3f164
289#define ixMCARB_DRAM_TIMING_TABLE_85 0x3f168
290#define ixMCARB_DRAM_TIMING_TABLE_86 0x3f16c
291#define ixMCARB_DRAM_TIMING_TABLE_87 0x3f170
292#define ixMCARB_DRAM_TIMING_TABLE_88 0x3f174
293#define ixMCARB_DRAM_TIMING_TABLE_89 0x3f178
294#define ixMCARB_DRAM_TIMING_TABLE_90 0x3f17c
295#define ixMCARB_DRAM_TIMING_TABLE_91 0x3f180
296#define ixMCARB_DRAM_TIMING_TABLE_92 0x3f184
297#define ixMCARB_DRAM_TIMING_TABLE_93 0x3f188
298#define ixMCARB_DRAM_TIMING_TABLE_94 0x3f18c
299#define ixMCARB_DRAM_TIMING_TABLE_95 0x3f190
300#define ixMCARB_DRAM_TIMING_TABLE_96 0x3f194
301#define ixDPM_TABLE_1 0x3f198
302#define ixDPM_TABLE_2 0x3f19c
303#define ixDPM_TABLE_3 0x3f1a0
304#define ixDPM_TABLE_4 0x3f1a4
305#define ixDPM_TABLE_5 0x3f1a8
306#define ixDPM_TABLE_6 0x3f1ac
307#define ixDPM_TABLE_7 0x3f1b0
308#define ixDPM_TABLE_8 0x3f1b4
309#define ixDPM_TABLE_9 0x3f1b8
310#define ixDPM_TABLE_10 0x3f1bc
311#define ixDPM_TABLE_11 0x3f1c0
312#define ixDPM_TABLE_12 0x3f1c4
313#define ixDPM_TABLE_13 0x3f1c8
314#define ixDPM_TABLE_14 0x3f1cc
315#define ixDPM_TABLE_15 0x3f1d0
316#define ixDPM_TABLE_16 0x3f1d4
317#define ixDPM_TABLE_17 0x3f1d8
318#define ixDPM_TABLE_18 0x3f1dc
319#define ixDPM_TABLE_19 0x3f1e0
320#define ixDPM_TABLE_20 0x3f1e4
321#define ixDPM_TABLE_21 0x3f1e8
322#define ixDPM_TABLE_22 0x3f1ec
323#define ixDPM_TABLE_23 0x3f1f0
324#define ixDPM_TABLE_24 0x3f1f4
325#define ixDPM_TABLE_25 0x3f1f8
326#define ixDPM_TABLE_26 0x3f1fc
327#define ixDPM_TABLE_27 0x3f200
328#define ixDPM_TABLE_28 0x3f204
329#define ixDPM_TABLE_29 0x3f208
330#define ixDPM_TABLE_30 0x3f20c
331#define ixDPM_TABLE_31 0x3f210
332#define ixDPM_TABLE_32 0x3f214
333#define ixDPM_TABLE_33 0x3f218
334#define ixDPM_TABLE_34 0x3f21c
335#define ixDPM_TABLE_35 0x3f220
336#define ixDPM_TABLE_36 0x3f224
337#define ixDPM_TABLE_37 0x3f228
338#define ixDPM_TABLE_38 0x3f22c
339#define ixDPM_TABLE_39 0x3f230
340#define ixDPM_TABLE_40 0x3f234
341#define ixDPM_TABLE_41 0x3f238
342#define ixDPM_TABLE_42 0x3f23c
343#define ixDPM_TABLE_43 0x3f240
344#define ixDPM_TABLE_44 0x3f244
345#define ixDPM_TABLE_45 0x3f248
346#define ixDPM_TABLE_46 0x3f24c
347#define ixDPM_TABLE_47 0x3f250
348#define ixDPM_TABLE_48 0x3f254
349#define ixDPM_TABLE_49 0x3f258
350#define ixDPM_TABLE_50 0x3f25c
351#define ixDPM_TABLE_51 0x3f260
352#define ixDPM_TABLE_52 0x3f264
353#define ixDPM_TABLE_53 0x3f268
354#define ixDPM_TABLE_54 0x3f26c
355#define ixDPM_TABLE_55 0x3f270
356#define ixDPM_TABLE_56 0x3f274
357#define ixDPM_TABLE_57 0x3f278
358#define ixDPM_TABLE_58 0x3f27c
359#define ixDPM_TABLE_59 0x3f280
360#define ixDPM_TABLE_60 0x3f284
361#define ixDPM_TABLE_61 0x3f288
362#define ixDPM_TABLE_62 0x3f28c
363#define ixDPM_TABLE_63 0x3f290
364#define ixDPM_TABLE_64 0x3f294
365#define ixDPM_TABLE_65 0x3f298
366#define ixDPM_TABLE_66 0x3f29c
367#define ixDPM_TABLE_67 0x3f2a0
368#define ixDPM_TABLE_68 0x3f2a4
369#define ixDPM_TABLE_69 0x3f2a8
370#define ixDPM_TABLE_70 0x3f2ac
371#define ixDPM_TABLE_71 0x3f2b0
372#define ixDPM_TABLE_72 0x3f2b4
373#define ixDPM_TABLE_73 0x3f2b8
374#define ixDPM_TABLE_74 0x3f2bc
375#define ixDPM_TABLE_75 0x3f2c0
376#define ixDPM_TABLE_76 0x3f2c4
377#define ixDPM_TABLE_77 0x3f2c8
378#define ixDPM_TABLE_78 0x3f2cc
379#define ixDPM_TABLE_79 0x3f2d0
380#define ixDPM_TABLE_80 0x3f2d4
381#define ixDPM_TABLE_81 0x3f2d8
382#define ixDPM_TABLE_82 0x3f2dc
383#define ixDPM_TABLE_83 0x3f2e0
384#define ixDPM_TABLE_84 0x3f2e4
385#define ixDPM_TABLE_85 0x3f2e8
386#define ixDPM_TABLE_86 0x3f2ec
387#define ixDPM_TABLE_87 0x3f2f0
388#define ixDPM_TABLE_88 0x3f2f4
389#define ixDPM_TABLE_89 0x3f2f8
390#define ixDPM_TABLE_90 0x3f2fc
391#define ixDPM_TABLE_91 0x3f300
392#define ixDPM_TABLE_92 0x3f304
393#define ixDPM_TABLE_93 0x3f308
394#define ixDPM_TABLE_94 0x3f30c
395#define ixDPM_TABLE_95 0x3f310
396#define ixDPM_TABLE_96 0x3f314
397#define ixDPM_TABLE_97 0x3f318
398#define ixDPM_TABLE_98 0x3f31c
399#define ixDPM_TABLE_99 0x3f320
400#define ixDPM_TABLE_100 0x3f324
401#define ixDPM_TABLE_101 0x3f328
402#define ixDPM_TABLE_102 0x3f32c
403#define ixDPM_TABLE_103 0x3f330
404#define ixDPM_TABLE_104 0x3f334
405#define ixDPM_TABLE_105 0x3f338
406#define ixDPM_TABLE_106 0x3f33c
407#define ixDPM_TABLE_107 0x3f340
408#define ixDPM_TABLE_108 0x3f344
409#define ixDPM_TABLE_109 0x3f348
410#define ixDPM_TABLE_110 0x3f34c
411#define ixDPM_TABLE_111 0x3f350
412#define ixDPM_TABLE_112 0x3f354
413#define ixDPM_TABLE_113 0x3f358
414#define ixDPM_TABLE_114 0x3f35c
415#define ixDPM_TABLE_115 0x3f360
416#define ixDPM_TABLE_116 0x3f364
417#define ixDPM_TABLE_117 0x3f368
418#define ixDPM_TABLE_118 0x3f36c
419#define ixDPM_TABLE_119 0x3f370
420#define ixDPM_TABLE_120 0x3f374
421#define ixDPM_TABLE_121 0x3f378
422#define ixDPM_TABLE_122 0x3f37c
423#define ixDPM_TABLE_123 0x3f380
424#define ixDPM_TABLE_124 0x3f384
425#define ixDPM_TABLE_125 0x3f388
426#define ixDPM_TABLE_126 0x3f38c
427#define ixDPM_TABLE_127 0x3f390
428#define ixDPM_TABLE_128 0x3f394
429#define ixDPM_TABLE_129 0x3f398
430#define ixDPM_TABLE_130 0x3f39c
431#define ixDPM_TABLE_131 0x3f3a0
432#define ixDPM_TABLE_132 0x3f3a4
433#define ixDPM_TABLE_133 0x3f3a8
434#define ixDPM_TABLE_134 0x3f3ac
435#define ixDPM_TABLE_135 0x3f3b0
436#define ixDPM_TABLE_136 0x3f3b4
437#define ixDPM_TABLE_137 0x3f3b8
438#define ixDPM_TABLE_138 0x3f3bc
439#define ixDPM_TABLE_139 0x3f3c0
440#define ixDPM_TABLE_140 0x3f3c4
441#define ixDPM_TABLE_141 0x3f3c8
442#define ixDPM_TABLE_142 0x3f3cc
443#define ixDPM_TABLE_143 0x3f3d0
444#define ixDPM_TABLE_144 0x3f3d4
445#define ixDPM_TABLE_145 0x3f3d8
446#define ixDPM_TABLE_146 0x3f3dc
447#define ixDPM_TABLE_147 0x3f3e0
448#define ixDPM_TABLE_148 0x3f3e4
449#define ixDPM_TABLE_149 0x3f3e8
450#define ixDPM_TABLE_150 0x3f3ec
451#define ixDPM_TABLE_151 0x3f3f0
452#define ixDPM_TABLE_152 0x3f3f4
453#define ixDPM_TABLE_153 0x3f3f8
454#define ixDPM_TABLE_154 0x3f3fc
455#define ixDPM_TABLE_155 0x3f400
456#define ixDPM_TABLE_156 0x3f404
457#define ixDPM_TABLE_157 0x3f408
458#define ixDPM_TABLE_158 0x3f40c
459#define ixDPM_TABLE_159 0x3f410
460#define ixDPM_TABLE_160 0x3f414
461#define ixDPM_TABLE_161 0x3f418
462#define ixDPM_TABLE_162 0x3f41c
463#define ixDPM_TABLE_163 0x3f420
464#define ixDPM_TABLE_164 0x3f424
465#define ixDPM_TABLE_165 0x3f428
466#define ixDPM_TABLE_166 0x3f42c
467#define ixDPM_TABLE_167 0x3f430
468#define ixDPM_TABLE_168 0x3f434
469#define ixDPM_TABLE_169 0x3f438
470#define ixDPM_TABLE_170 0x3f43c
471#define ixDPM_TABLE_171 0x3f440
472#define ixDPM_TABLE_172 0x3f444
473#define ixDPM_TABLE_173 0x3f448
474#define ixDPM_TABLE_174 0x3f44c
475#define ixDPM_TABLE_175 0x3f450
476#define ixDPM_TABLE_176 0x3f454
477#define ixDPM_TABLE_177 0x3f458
478#define ixDPM_TABLE_178 0x3f45c
479#define ixDPM_TABLE_179 0x3f460
480#define ixDPM_TABLE_180 0x3f464
481#define ixDPM_TABLE_181 0x3f468
482#define ixDPM_TABLE_182 0x3f46c
483#define ixDPM_TABLE_183 0x3f470
484#define ixDPM_TABLE_184 0x3f474
485#define ixDPM_TABLE_185 0x3f478
486#define ixDPM_TABLE_186 0x3f47c
487#define ixDPM_TABLE_187 0x3f480
488#define ixDPM_TABLE_188 0x3f484
489#define ixDPM_TABLE_189 0x3f488
490#define ixDPM_TABLE_190 0x3f48c
491#define ixDPM_TABLE_191 0x3f490
492#define ixDPM_TABLE_192 0x3f494
493#define ixDPM_TABLE_193 0x3f498
494#define ixDPM_TABLE_194 0x3f49c
495#define ixDPM_TABLE_195 0x3f4a0
496#define ixDPM_TABLE_196 0x3f4a4
497#define ixDPM_TABLE_197 0x3f4a8
498#define ixDPM_TABLE_198 0x3f4ac
499#define ixDPM_TABLE_199 0x3f4b0
500#define ixDPM_TABLE_200 0x3f4b4
501#define ixDPM_TABLE_201 0x3f4b8
502#define ixDPM_TABLE_202 0x3f4bc
503#define ixDPM_TABLE_203 0x3f4c0
504#define ixDPM_TABLE_204 0x3f4c4
505#define ixDPM_TABLE_205 0x3f4c8
506#define ixDPM_TABLE_206 0x3f4cc
507#define ixDPM_TABLE_207 0x3f4d0
508#define ixDPM_TABLE_208 0x3f4d4
509#define ixDPM_TABLE_209 0x3f4d8
510#define ixDPM_TABLE_210 0x3f4dc
511#define ixDPM_TABLE_211 0x3f4e0
512#define ixDPM_TABLE_212 0x3f4e4
513#define ixDPM_TABLE_213 0x3f4e8
514#define ixDPM_TABLE_214 0x3f4ec
515#define ixDPM_TABLE_215 0x3f4f0
516#define ixDPM_TABLE_216 0x3f4f4
517#define ixDPM_TABLE_217 0x3f4f8
518#define ixDPM_TABLE_218 0x3f4fc
519#define ixDPM_TABLE_219 0x3f500
520#define ixDPM_TABLE_220 0x3f504
521#define ixDPM_TABLE_221 0x3f508
522#define ixDPM_TABLE_222 0x3f50c
523#define ixDPM_TABLE_223 0x3f510
524#define ixDPM_TABLE_224 0x3f514
525#define ixDPM_TABLE_225 0x3f518
526#define ixDPM_TABLE_226 0x3f51c
527#define ixDPM_TABLE_227 0x3f520
528#define ixDPM_TABLE_228 0x3f524
529#define ixDPM_TABLE_229 0x3f528
530#define ixDPM_TABLE_230 0x3f52c
531#define ixDPM_TABLE_231 0x3f530
532#define ixDPM_TABLE_232 0x3f534
533#define ixDPM_TABLE_233 0x3f538
534#define ixDPM_TABLE_234 0x3f53c
535#define ixDPM_TABLE_235 0x3f540
536#define ixDPM_TABLE_236 0x3f544
537#define ixDPM_TABLE_237 0x3f548
538#define ixDPM_TABLE_238 0x3f54c
539#define ixDPM_TABLE_239 0x3f550
540#define ixDPM_TABLE_240 0x3f554
541#define ixDPM_TABLE_241 0x3f558
542#define ixDPM_TABLE_242 0x3f55c
543#define ixDPM_TABLE_243 0x3f560
544#define ixDPM_TABLE_244 0x3f564
545#define ixDPM_TABLE_245 0x3f568
546#define ixDPM_TABLE_246 0x3f56c
547#define ixDPM_TABLE_247 0x3f570
548#define ixDPM_TABLE_248 0x3f574
549#define ixDPM_TABLE_249 0x3f578
550#define ixDPM_TABLE_250 0x3f57c
551#define ixDPM_TABLE_251 0x3f580
552#define ixDPM_TABLE_252 0x3f584
553#define ixDPM_TABLE_253 0x3f588
554#define ixDPM_TABLE_254 0x3f58c
555#define ixDPM_TABLE_255 0x3f590
556#define ixDPM_TABLE_256 0x3f594
557#define ixDPM_TABLE_257 0x3f598
558#define ixDPM_TABLE_258 0x3f59c
559#define ixDPM_TABLE_259 0x3f5a0
560#define ixDPM_TABLE_260 0x3f5a4
561#define ixDPM_TABLE_261 0x3f5a8
562#define ixDPM_TABLE_262 0x3f5ac
563#define ixDPM_TABLE_263 0x3f5b0
564#define ixDPM_TABLE_264 0x3f5b4
565#define ixDPM_TABLE_265 0x3f5b8
566#define ixDPM_TABLE_266 0x3f5bc
567#define ixDPM_TABLE_267 0x3f5c0
568#define ixDPM_TABLE_268 0x3f5c4
569#define ixDPM_TABLE_269 0x3f5c8
570#define ixDPM_TABLE_270 0x3f5cc
571#define ixDPM_TABLE_271 0x3f5d0
572#define ixDPM_TABLE_272 0x3f5d4
573#define ixDPM_TABLE_273 0x3f5d8
574#define ixDPM_TABLE_274 0x3f5dc
575#define ixDPM_TABLE_275 0x3f5e0
576#define ixDPM_TABLE_276 0x3f5e4
577#define ixDPM_TABLE_277 0x3f5e8
578#define ixDPM_TABLE_278 0x3f5ec
579#define ixDPM_TABLE_279 0x3f5f0
580#define ixDPM_TABLE_280 0x3f5f4
581#define ixDPM_TABLE_281 0x3f5f8
582#define ixDPM_TABLE_282 0x3f5fc
583#define ixDPM_TABLE_283 0x3f600
584#define ixDPM_TABLE_284 0x3f604
585#define ixDPM_TABLE_285 0x3f608
586#define ixDPM_TABLE_286 0x3f60c
587#define ixDPM_TABLE_287 0x3f610
588#define ixDPM_TABLE_288 0x3f614
589#define ixDPM_TABLE_289 0x3f618
590#define ixDPM_TABLE_290 0x3f61c
591#define ixDPM_TABLE_291 0x3f620
592#define ixDPM_TABLE_292 0x3f624
593#define ixDPM_TABLE_293 0x3f628
594#define ixDPM_TABLE_294 0x3f62c
595#define ixDPM_TABLE_295 0x3f630
596#define ixDPM_TABLE_296 0x3f634
597#define ixDPM_TABLE_297 0x3f638
598#define ixDPM_TABLE_298 0x3f63c
599#define ixDPM_TABLE_299 0x3f640
600#define ixDPM_TABLE_300 0x3f644
601#define ixDPM_TABLE_301 0x3f648
602#define ixDPM_TABLE_302 0x3f64c
603#define ixDPM_TABLE_303 0x3f650
604#define ixDPM_TABLE_304 0x3f654
605#define ixDPM_TABLE_305 0x3f658
606#define ixDPM_TABLE_306 0x3f65c
607#define ixDPM_TABLE_307 0x3f660
608#define ixDPM_TABLE_308 0x3f664
609#define ixDPM_TABLE_309 0x3f668
610#define ixDPM_TABLE_310 0x3f66c
611#define ixDPM_TABLE_311 0x3f670
612#define ixDPM_TABLE_312 0x3f674
613#define ixDPM_TABLE_313 0x3f678
614#define ixDPM_TABLE_314 0x3f67c
615#define ixDPM_TABLE_315 0x3f680
616#define ixDPM_TABLE_316 0x3f684
617#define ixDPM_TABLE_317 0x3f688
618#define ixDPM_TABLE_318 0x3f68c
619#define ixDPM_TABLE_319 0x3f690
620#define ixDPM_TABLE_320 0x3f694
621#define ixDPM_TABLE_321 0x3f698
622#define ixDPM_TABLE_322 0x3f69c
623#define ixDPM_TABLE_323 0x3f6a0
624#define ixDPM_TABLE_324 0x3f6a4
625#define ixDPM_TABLE_325 0x3f6a8
626#define ixDPM_TABLE_326 0x3f6ac
627#define ixDPM_TABLE_327 0x3f6b0
628#define ixDPM_TABLE_328 0x3f6b4
629#define ixDPM_TABLE_329 0x3f6b8
630#define ixDPM_TABLE_330 0x3f6bc
631#define ixDPM_TABLE_331 0x3f6c0
632#define ixDPM_TABLE_332 0x3f6c4
633#define ixDPM_TABLE_333 0x3f6c8
634#define ixDPM_TABLE_334 0x3f6cc
635#define ixDPM_TABLE_335 0x3f6d0
636#define ixDPM_TABLE_336 0x3f6d4
637#define ixDPM_TABLE_337 0x3f6d8
638#define ixDPM_TABLE_338 0x3f6dc
639#define ixDPM_TABLE_339 0x3f6e0
640#define ixDPM_TABLE_340 0x3f6e4
641#define ixDPM_TABLE_341 0x3f6e8
642#define ixDPM_TABLE_342 0x3f6ec
643#define ixDPM_TABLE_343 0x3f6f0
644#define ixDPM_TABLE_344 0x3f6f4
645#define ixDPM_TABLE_345 0x3f6f8
646#define ixDPM_TABLE_346 0x3f6fc
647#define ixDPM_TABLE_347 0x3f700
648#define ixDPM_TABLE_348 0x3f704
649#define ixDPM_TABLE_349 0x3f708
650#define ixDPM_TABLE_350 0x3f70c
651#define ixDPM_TABLE_351 0x3f710
652#define ixDPM_TABLE_352 0x3f714
653#define ixDPM_TABLE_353 0x3f718
654#define ixDPM_TABLE_354 0x3f71c
655#define ixDPM_TABLE_355 0x3f720
656#define ixDPM_TABLE_356 0x3f724
657#define ixDPM_TABLE_357 0x3f728
658#define ixDPM_TABLE_358 0x3f72c
659#define ixDPM_TABLE_359 0x3f730
660#define ixDPM_TABLE_360 0x3f734
661#define ixDPM_TABLE_361 0x3f738
662#define ixDPM_TABLE_362 0x3f73c
663#define ixDPM_TABLE_363 0x3f740
664#define ixDPM_TABLE_364 0x3f744
665#define ixDPM_TABLE_365 0x3f748
666#define ixDPM_TABLE_366 0x3f74c
667#define ixDPM_TABLE_367 0x3f750
668#define ixDPM_TABLE_368 0x3f754
669#define ixDPM_TABLE_369 0x3f758
670#define ixDPM_TABLE_370 0x3f75c
671#define ixDPM_TABLE_371 0x3f760
672#define ixDPM_TABLE_372 0x3f764
673#define ixDPM_TABLE_373 0x3f768
674#define ixDPM_TABLE_374 0x3f76c
675#define ixDPM_TABLE_375 0x3f770
676#define ixDPM_TABLE_376 0x3f774
677#define ixDPM_TABLE_377 0x3f778
678#define ixDPM_TABLE_378 0x3f77c
679#define ixDPM_TABLE_379 0x3f780
680#define ixDPM_TABLE_380 0x3f784
681#define ixDPM_TABLE_381 0x3f788
682#define ixDPM_TABLE_382 0x3f78c
683#define ixDPM_TABLE_383 0x3f790
684#define ixDPM_TABLE_384 0x3f794
685#define ixDPM_TABLE_385 0x3f798
686#define ixDPM_TABLE_386 0x3f79c
687#define ixDPM_TABLE_387 0x3f7a0
688#define ixDPM_TABLE_388 0x3f7a4
689#define ixDPM_TABLE_389 0x3f7a8
690#define ixDPM_TABLE_390 0x3f7ac
691#define ixDPM_TABLE_391 0x3f7b0
692#define ixDPM_TABLE_392 0x3f7b4
693#define ixDPM_TABLE_393 0x3f7b8
694#define ixDPM_TABLE_394 0x3f7bc
695#define ixDPM_TABLE_395 0x3f7c0
696#define ixDPM_TABLE_396 0x3f7c4
697#define ixDPM_TABLE_397 0x3f7c8
698#define ixDPM_TABLE_398 0x3f7cc
699#define ixDPM_TABLE_399 0x3f7d0
700#define ixDPM_TABLE_400 0x3f7d4
701#define ixDPM_TABLE_401 0x3f7d8
702#define ixDPM_TABLE_402 0x3f7dc
703#define ixDPM_TABLE_403 0x3f7e0
704#define ixDPM_TABLE_404 0x3f7e4
705#define ixDPM_TABLE_405 0x3f7e8
706#define ixDPM_TABLE_406 0x3f7ec
707#define ixDPM_TABLE_407 0x3f7f0
708#define ixDPM_TABLE_408 0x3f7f4
709#define ixDPM_TABLE_409 0x3f7f8
710#define ixDPM_TABLE_410 0x3f7fc
711#define ixDPM_TABLE_411 0x3f800
712#define ixDPM_TABLE_412 0x3f804
713#define ixDPM_TABLE_413 0x3f808
714#define ixDPM_TABLE_414 0x3f80c
715#define ixDPM_TABLE_415 0x3f810
716#define ixDPM_TABLE_416 0x3f814
717#define ixDPM_TABLE_417 0x3f818
718#define ixDPM_TABLE_418 0x3f81c
719#define ixDPM_TABLE_419 0x3f820
720#define ixDPM_TABLE_420 0x3f824
721#define ixDPM_TABLE_421 0x3f828
722#define ixDPM_TABLE_422 0x3f82c
723#define ixDPM_TABLE_423 0x3f830
724#define ixDPM_TABLE_424 0x3f834
725#define ixDPM_TABLE_425 0x3f838
726#define ixDPM_TABLE_426 0x3f83c
727#define ixDPM_TABLE_427 0x3f840
728#define ixDPM_TABLE_428 0x3f844
729#define ixDPM_TABLE_429 0x3f848
730#define ixDPM_TABLE_430 0x3f84c
731#define ixDPM_TABLE_431 0x3f850
732#define ixDPM_TABLE_432 0x3f854
733#define ixDPM_TABLE_433 0x3f858
734#define ixDPM_TABLE_434 0x3f85c
735#define ixDPM_TABLE_435 0x3f860
736#define ixDPM_TABLE_436 0x3f864
737#define ixDPM_TABLE_437 0x3f868
738#define ixDPM_TABLE_438 0x3f86c
739#define ixDPM_TABLE_439 0x3f870
740#define ixDPM_TABLE_440 0x3f874
741#define ixSOFT_REGISTERS_TABLE_1 0x3f89c
742#define ixSOFT_REGISTERS_TABLE_2 0x3f8a0
743#define ixSOFT_REGISTERS_TABLE_3 0x3f8a4
744#define ixSOFT_REGISTERS_TABLE_4 0x3f8a8
745#define ixSOFT_REGISTERS_TABLE_5 0x3f8ac
746#define ixSOFT_REGISTERS_TABLE_6 0x3f8b0
747#define ixSOFT_REGISTERS_TABLE_7 0x3f8b4
748#define ixSOFT_REGISTERS_TABLE_8 0x3f8b8
749#define ixSOFT_REGISTERS_TABLE_9 0x3f8bc
750#define ixSOFT_REGISTERS_TABLE_10 0x3f8c0
751#define ixSOFT_REGISTERS_TABLE_11 0x3f8c4
752#define ixSOFT_REGISTERS_TABLE_12 0x3f8c8
753#define ixSOFT_REGISTERS_TABLE_13 0x3f8cc
754#define ixSOFT_REGISTERS_TABLE_14 0x3f8d0
755#define ixSOFT_REGISTERS_TABLE_15 0x3f8d4
756#define ixSOFT_REGISTERS_TABLE_16 0x3f8d8
757#define ixSOFT_REGISTERS_TABLE_17 0x3f8dc
758#define ixSOFT_REGISTERS_TABLE_18 0x3f8e0
759#define ixSOFT_REGISTERS_TABLE_19 0x3f8e4
760#define ixSOFT_REGISTERS_TABLE_20 0x3f8e8
761#define ixSOFT_REGISTERS_TABLE_21 0x3f8ec
762#define ixSOFT_REGISTERS_TABLE_22 0x3f8f0
763#define ixSOFT_REGISTERS_TABLE_23 0x3f8f4
764#define ixSOFT_REGISTERS_TABLE_24 0x3f8f8
765#define ixSOFT_REGISTERS_TABLE_25 0x3f8fc
766#define ixSOFT_REGISTERS_TABLE_26 0x3f900
767#define ixSOFT_REGISTERS_TABLE_27 0x3f904
768#define ixSOFT_REGISTERS_TABLE_28 0x3f888
769#define ixSOFT_REGISTERS_TABLE_29 0x3f90c
770#define ixSOFT_REGISTERS_TABLE_30 0x3f910
771#define ixPM_FUSES_1 0x3f914
772#define ixPM_FUSES_2 0x3f918
773#define ixPM_FUSES_3 0x3f91c
774#define ixPM_FUSES_4 0x3f920
775#define ixPM_FUSES_5 0x3f924
776#define ixPM_FUSES_6 0x3f928
777#define ixPM_FUSES_7 0x3f92c
778#define ixPM_FUSES_8 0x3f930
779#define ixPM_FUSES_9 0x3f934
780#define ixPM_FUSES_10 0x3f938
781#define ixPM_FUSES_11 0x3f93c
782#define ixPM_FUSES_12 0x3f940
783#define ixPM_FUSES_13 0x3f944
784#define ixPM_FUSES_14 0x3f948
785#define ixPM_FUSES_15 0x3f94c
786#define ixSMU_PM_STATUS_0 0x3fe00
787#define ixSMU_PM_STATUS_1 0x3fe04
788#define ixSMU_PM_STATUS_2 0x3fe08
789#define ixSMU_PM_STATUS_3 0x3fe0c
790#define ixSMU_PM_STATUS_4 0x3fe10
791#define ixSMU_PM_STATUS_5 0x3fe14
792#define ixSMU_PM_STATUS_6 0x3fe18
793#define ixSMU_PM_STATUS_7 0x3fe1c
794#define ixSMU_PM_STATUS_8 0x3fe20
795#define ixSMU_PM_STATUS_9 0x3fe24
796#define ixSMU_PM_STATUS_10 0x3fe28
797#define ixSMU_PM_STATUS_11 0x3fe2c
798#define ixSMU_PM_STATUS_12 0x3fe30
799#define ixSMU_PM_STATUS_13 0x3fe34
800#define ixSMU_PM_STATUS_14 0x3fe38
801#define ixSMU_PM_STATUS_15 0x3fe3c
802#define ixSMU_PM_STATUS_16 0x3fe40
803#define ixSMU_PM_STATUS_17 0x3fe44
804#define ixSMU_PM_STATUS_18 0x3fe48
805#define ixSMU_PM_STATUS_19 0x3fe4c
806#define ixSMU_PM_STATUS_20 0x3fe50
807#define ixSMU_PM_STATUS_21 0x3fe54
808#define ixSMU_PM_STATUS_22 0x3fe58
809#define ixSMU_PM_STATUS_23 0x3fe5c
810#define ixSMU_PM_STATUS_24 0x3fe60
811#define ixSMU_PM_STATUS_25 0x3fe64
812#define ixSMU_PM_STATUS_26 0x3fe68
813#define ixSMU_PM_STATUS_27 0x3fe6c
814#define ixSMU_PM_STATUS_28 0x3fe70
815#define ixSMU_PM_STATUS_29 0x3fe74
816#define ixSMU_PM_STATUS_30 0x3fe78
817#define ixSMU_PM_STATUS_31 0x3fe7c
818#define ixSMU_PM_STATUS_32 0x3fe80
819#define ixSMU_PM_STATUS_33 0x3fe84
820#define ixSMU_PM_STATUS_34 0x3fe88
821#define ixSMU_PM_STATUS_35 0x3fe8c
822#define ixSMU_PM_STATUS_36 0x3fe90
823#define ixSMU_PM_STATUS_37 0x3fe94
824#define ixSMU_PM_STATUS_38 0x3fe98
825#define ixSMU_PM_STATUS_39 0x3fe9c
826#define ixSMU_PM_STATUS_40 0x3fea0
827#define ixSMU_PM_STATUS_41 0x3fea4
828#define ixSMU_PM_STATUS_42 0x3fea8
829#define ixSMU_PM_STATUS_43 0x3feac
830#define ixSMU_PM_STATUS_44 0x3feb0
831#define ixSMU_PM_STATUS_45 0x3feb4
832#define ixSMU_PM_STATUS_46 0x3feb8
833#define ixSMU_PM_STATUS_47 0x3febc
834#define ixSMU_PM_STATUS_48 0x3fec0
835#define ixSMU_PM_STATUS_49 0x3fec4
836#define ixSMU_PM_STATUS_50 0x3fec8
837#define ixSMU_PM_STATUS_51 0x3fecc
838#define ixSMU_PM_STATUS_52 0x3fed0
839#define ixSMU_PM_STATUS_53 0x3fed4
840#define ixSMU_PM_STATUS_54 0x3fed8
841#define ixSMU_PM_STATUS_55 0x3fedc
842#define ixSMU_PM_STATUS_56 0x3fee0
843#define ixSMU_PM_STATUS_57 0x3fee4
844#define ixSMU_PM_STATUS_58 0x3fee8
845#define ixSMU_PM_STATUS_59 0x3feec
846#define ixSMU_PM_STATUS_60 0x3fef0
847#define ixSMU_PM_STATUS_61 0x3fef4
848#define ixSMU_PM_STATUS_62 0x3fef8
849#define ixSMU_PM_STATUS_63 0x3fefc
850#define ixSMU_PM_STATUS_64 0x3ff00
851#define ixSMU_PM_STATUS_65 0x3ff04
852#define ixSMU_PM_STATUS_66 0x3ff08
853#define ixSMU_PM_STATUS_67 0x3ff0c
854#define ixSMU_PM_STATUS_68 0x3ff10
855#define ixSMU_PM_STATUS_69 0x3ff14
856#define ixSMU_PM_STATUS_70 0x3ff18
857#define ixSMU_PM_STATUS_71 0x3ff1c
858#define ixSMU_PM_STATUS_72 0x3ff20
859#define ixSMU_PM_STATUS_73 0x3ff24
860#define ixSMU_PM_STATUS_74 0x3ff28
861#define ixSMU_PM_STATUS_75 0x3ff2c
862#define ixSMU_PM_STATUS_76 0x3ff30
863#define ixSMU_PM_STATUS_77 0x3ff34
864#define ixSMU_PM_STATUS_78 0x3ff38
865#define ixSMU_PM_STATUS_79 0x3ff3c
866#define ixSMU_PM_STATUS_80 0x3ff40
867#define ixSMU_PM_STATUS_81 0x3ff44
868#define ixSMU_PM_STATUS_82 0x3ff48
869#define ixSMU_PM_STATUS_83 0x3ff4c
870#define ixSMU_PM_STATUS_84 0x3ff50
871#define ixSMU_PM_STATUS_85 0x3ff54
872#define ixSMU_PM_STATUS_86 0x3ff58
873#define ixSMU_PM_STATUS_87 0x3ff5c
874#define ixSMU_PM_STATUS_88 0x3ff60
875#define ixSMU_PM_STATUS_89 0x3ff64
876#define ixSMU_PM_STATUS_90 0x3ff68
877#define ixSMU_PM_STATUS_91 0x3ff6c
878#define ixSMU_PM_STATUS_92 0x3ff70
879#define ixSMU_PM_STATUS_93 0x3ff74
880#define ixSMU_PM_STATUS_94 0x3ff78
881#define ixSMU_PM_STATUS_95 0x3ff7c
882#define ixSMU_PM_STATUS_96 0x3ff80
883#define ixSMU_PM_STATUS_97 0x3ff84
884#define ixSMU_PM_STATUS_98 0x3ff88
885#define ixSMU_PM_STATUS_99 0x3ff8c
886#define ixSMU_PM_STATUS_100 0x3ff90
887#define ixSMU_PM_STATUS_101 0x3ff94
888#define ixSMU_PM_STATUS_102 0x3ff98
889#define ixSMU_PM_STATUS_103 0x3ff9c
890#define ixSMU_PM_STATUS_104 0x3ffa0
891#define ixSMU_PM_STATUS_105 0x3ffa4
892#define ixSMU_PM_STATUS_106 0x3ffa8
893#define ixSMU_PM_STATUS_107 0x3ffac
894#define ixSMU_PM_STATUS_108 0x3ffb0
895#define ixSMU_PM_STATUS_109 0x3ffb4
896#define ixSMU_PM_STATUS_110 0x3ffb8
897#define ixSMU_PM_STATUS_111 0x3ffbc
898#define ixSMU_PM_STATUS_112 0x3ffc0
899#define ixSMU_PM_STATUS_113 0x3ffc4
900#define ixSMU_PM_STATUS_114 0x3ffc8
901#define ixSMU_PM_STATUS_115 0x3ffcc
902#define ixSMU_PM_STATUS_116 0x3ffd0
903#define ixSMU_PM_STATUS_117 0x3ffd4
904#define ixSMU_PM_STATUS_118 0x3ffd8
905#define ixSMU_PM_STATUS_119 0x3ffdc
906#define ixSMU_PM_STATUS_120 0x3ffe0
907#define ixSMU_PM_STATUS_121 0x3ffe4
908#define ixSMU_PM_STATUS_122 0x3ffe8
909#define ixSMU_PM_STATUS_123 0x3ffec
910#define ixSMU_PM_STATUS_124 0x3fff0
911#define ixSMU_PM_STATUS_125 0x3fff4
912#define ixSMU_PM_STATUS_126 0x3fff8
913#define ixSMU_PM_STATUS_127 0x3fffc
914#define ixCG_THERMAL_INT_ENA 0xc2100024
915#define ixCG_THERMAL_INT_CTRL 0xc2100028
916#define ixCG_THERMAL_INT_STATUS 0xc210002c
917#define ixCG_THERMAL_CTRL 0xc0300004
918#define ixCG_THERMAL_STATUS 0xc0300008
919#define ixCG_THERMAL_INT 0xc030000c
920#define ixCG_MULT_THERMAL_CTRL 0xc0300010
921#define ixCG_MULT_THERMAL_STATUS 0xc0300014
922#define ixTHM_TMON2_CTRL 0xc0300034
923#define ixTHM_TMON2_CTRL2 0xc0300038
924#define ixTHM_TMON2_CSR_WR 0xc0300054
925#define ixTHM_TMON2_CSR_RD 0xc0300058
926#define ixCG_FDO_CTRL0 0xc0300064
927#define ixCG_FDO_CTRL1 0xc0300068
928#define ixCG_FDO_CTRL2 0xc030006c
929#define ixCG_TACH_CTRL 0xc0300070
930#define ixCG_TACH_STATUS 0xc0300074
931#define ixCC_THM_STRAPS0 0xc0300080
932#define ixTHM_TMON0_RDIL0_DATA 0xc0300100
933#define ixTHM_TMON0_RDIL1_DATA 0xc0300104
934#define ixTHM_TMON0_RDIL2_DATA 0xc0300108
935#define ixTHM_TMON0_RDIL3_DATA 0xc030010c
936#define ixTHM_TMON0_RDIL4_DATA 0xc0300110
937#define ixTHM_TMON0_RDIL5_DATA 0xc0300114
938#define ixTHM_TMON0_RDIL6_DATA 0xc0300118
939#define ixTHM_TMON0_RDIL7_DATA 0xc030011c
940#define ixTHM_TMON0_RDIL8_DATA 0xc0300120
941#define ixTHM_TMON0_RDIL9_DATA 0xc0300124
942#define ixTHM_TMON0_RDIL10_DATA 0xc0300128
943#define ixTHM_TMON0_RDIL11_DATA 0xc030012c
944#define ixTHM_TMON0_RDIL12_DATA 0xc0300130
945#define ixTHM_TMON0_RDIL13_DATA 0xc0300134
946#define ixTHM_TMON0_RDIL14_DATA 0xc0300138
947#define ixTHM_TMON0_RDIL15_DATA 0xc030013c
948#define ixTHM_TMON0_RDIR0_DATA 0xc0300140
949#define ixTHM_TMON0_RDIR1_DATA 0xc0300144
950#define ixTHM_TMON0_RDIR2_DATA 0xc0300148
951#define ixTHM_TMON0_RDIR3_DATA 0xc030014c
952#define ixTHM_TMON0_RDIR4_DATA 0xc0300150
953#define ixTHM_TMON0_RDIR5_DATA 0xc0300154
954#define ixTHM_TMON0_RDIR6_DATA 0xc0300158
955#define ixTHM_TMON0_RDIR7_DATA 0xc030015c
956#define ixTHM_TMON0_RDIR8_DATA 0xc0300160
957#define ixTHM_TMON0_RDIR9_DATA 0xc0300164
958#define ixTHM_TMON0_RDIR10_DATA 0xc0300168
959#define ixTHM_TMON0_RDIR11_DATA 0xc030016c
960#define ixTHM_TMON0_RDIR12_DATA 0xc0300170
961#define ixTHM_TMON0_RDIR13_DATA 0xc0300174
962#define ixTHM_TMON0_RDIR14_DATA 0xc0300178
963#define ixTHM_TMON0_RDIR15_DATA 0xc030017c
964#define ixTHM_TMON1_RDIL0_DATA 0xc0300180
965#define ixTHM_TMON1_RDIL1_DATA 0xc0300184
966#define ixTHM_TMON1_RDIL2_DATA 0xc0300188
967#define ixTHM_TMON1_RDIL3_DATA 0xc030018c
968#define ixTHM_TMON1_RDIL4_DATA 0xc0300190
969#define ixTHM_TMON1_RDIL5_DATA 0xc0300194
970#define ixTHM_TMON1_RDIL6_DATA 0xc0300198
971#define ixTHM_TMON1_RDIL7_DATA 0xc030019c
972#define ixTHM_TMON1_RDIL8_DATA 0xc03001a0
973#define ixTHM_TMON1_RDIL9_DATA 0xc03001a4
974#define ixTHM_TMON1_RDIL10_DATA 0xc03001a8
975#define ixTHM_TMON1_RDIL11_DATA 0xc03001ac
976#define ixTHM_TMON1_RDIL12_DATA 0xc03001b0
977#define ixTHM_TMON1_RDIL13_DATA 0xc03001b4
978#define ixTHM_TMON1_RDIL14_DATA 0xc03001b8
979#define ixTHM_TMON1_RDIL15_DATA 0xc03001bc
980#define ixTHM_TMON1_RDIR0_DATA 0xc03001c0
981#define ixTHM_TMON1_RDIR1_DATA 0xc03001c4
982#define ixTHM_TMON1_RDIR2_DATA 0xc03001c8
983#define ixTHM_TMON1_RDIR3_DATA 0xc03001cc
984#define ixTHM_TMON1_RDIR4_DATA 0xc03001d0
985#define ixTHM_TMON1_RDIR5_DATA 0xc03001d4
986#define ixTHM_TMON1_RDIR6_DATA 0xc03001d8
987#define ixTHM_TMON1_RDIR7_DATA 0xc03001dc
988#define ixTHM_TMON1_RDIR8_DATA 0xc03001e0
989#define ixTHM_TMON1_RDIR9_DATA 0xc03001e4
990#define ixTHM_TMON1_RDIR10_DATA 0xc03001e8
991#define ixTHM_TMON1_RDIR11_DATA 0xc03001ec
992#define ixTHM_TMON1_RDIR12_DATA 0xc03001f0
993#define ixTHM_TMON1_RDIR13_DATA 0xc03001f4
994#define ixTHM_TMON1_RDIR14_DATA 0xc03001f8
995#define ixTHM_TMON1_RDIR15_DATA 0xc03001fc
996#define ixTHM_TMON2_RDIL0_DATA 0xc0300200
997#define ixTHM_TMON2_RDIL1_DATA 0xc0300204
998#define ixTHM_TMON2_RDIL2_DATA 0xc0300208
999#define ixTHM_TMON2_RDIL3_DATA 0xc030020c
1000#define ixTHM_TMON2_RDIL4_DATA 0xc0300210
1001#define ixTHM_TMON2_RDIL5_DATA 0xc0300214
1002#define ixTHM_TMON2_RDIL6_DATA 0xc0300218
1003#define ixTHM_TMON2_RDIL7_DATA 0xc030021c
1004#define ixTHM_TMON2_RDIL8_DATA 0xc0300220
1005#define ixTHM_TMON2_RDIL9_DATA 0xc0300224
1006#define ixTHM_TMON2_RDIL10_DATA 0xc0300228
1007#define ixTHM_TMON2_RDIL11_DATA 0xc030022c
1008#define ixTHM_TMON2_RDIL12_DATA 0xc0300230
1009#define ixTHM_TMON2_RDIL13_DATA 0xc0300234
1010#define ixTHM_TMON2_RDIL14_DATA 0xc0300238
1011#define ixTHM_TMON2_RDIL15_DATA 0xc030023c
1012#define ixTHM_TMON2_RDIR0_DATA 0xc0300240
1013#define ixTHM_TMON2_RDIR1_DATA 0xc0300244
1014#define ixTHM_TMON2_RDIR2_DATA 0xc0300248
1015#define ixTHM_TMON2_RDIR3_DATA 0xc030024c
1016#define ixTHM_TMON2_RDIR4_DATA 0xc0300250
1017#define ixTHM_TMON2_RDIR5_DATA 0xc0300254
1018#define ixTHM_TMON2_RDIR6_DATA 0xc0300258
1019#define ixTHM_TMON2_RDIR7_DATA 0xc030025c
1020#define ixTHM_TMON2_RDIR8_DATA 0xc0300260
1021#define ixTHM_TMON2_RDIR9_DATA 0xc0300264
1022#define ixTHM_TMON2_RDIR10_DATA 0xc0300268
1023#define ixTHM_TMON2_RDIR11_DATA 0xc030026c
1024#define ixTHM_TMON2_RDIR12_DATA 0xc0300270
1025#define ixTHM_TMON2_RDIR13_DATA 0xc0300274
1026#define ixTHM_TMON2_RDIR14_DATA 0xc0300278
1027#define ixTHM_TMON2_RDIR15_DATA 0xc030027c
1028#define ixTHM_TMON0_INT_DATA 0xc0300300
1029#define ixTHM_TMON1_INT_DATA 0xc0300304
1030#define ixTHM_TMON2_INT_DATA 0xc0300308
1031#define ixTHM_TMON0_DEBUG 0xc0300310
1032#define ixTHM_TMON1_DEBUG 0xc0300314
1033#define ixTHM_TMON2_DEBUG 0xc0300318
1034#define ixTHM_TMON0_STATUS 0xc0300320
1035#define ixTHM_TMON1_STATUS 0xc0300324
1036#define ixTHM_TMON2_STATUS 0xc0300328
1037#define ixGENERAL_PWRMGT 0xc0200000
1038#define ixCNB_PWRMGT_CNTL 0xc0200004
1039#define ixSCLK_PWRMGT_CNTL 0xc0200008
1040#define ixTARGET_AND_CURRENT_PROFILE_INDEX 0xc0200014
1041#define ixPWR_PCC_CONTROL 0xc0200018
1042#define ixPWR_PCC_GPIO_SELECT 0xc020001c
1043#define ixCG_FREQ_TRAN_VOTING_0 0xc02001a8
1044#define ixCG_FREQ_TRAN_VOTING_1 0xc02001ac
1045#define ixCG_FREQ_TRAN_VOTING_2 0xc02001b0
1046#define ixCG_FREQ_TRAN_VOTING_3 0xc02001b4
1047#define ixCG_FREQ_TRAN_VOTING_4 0xc02001b8
1048#define ixCG_FREQ_TRAN_VOTING_5 0xc02001bc
1049#define ixCG_FREQ_TRAN_VOTING_6 0xc02001c0
1050#define ixCG_FREQ_TRAN_VOTING_7 0xc02001c4
1051#define ixPLL_TEST_CNTL 0xc020003c
1052#define ixCG_STATIC_SCREEN_PARAMETER 0xc0200044
1053#define ixCG_DISPLAY_GAP_CNTL 0xc0200060
1054#define ixCG_DISPLAY_GAP_CNTL2 0xc0200230
1055#define ixCG_ACPI_CNTL 0xc0200064
1056#define ixSCLK_DEEP_SLEEP_CNTL 0xc0200080
1057#define ixSCLK_DEEP_SLEEP_CNTL2 0xc0200084
1058#define ixSCLK_DEEP_SLEEP_CNTL3 0xc020009c
1059#define ixSCLK_DEEP_SLEEP_MISC_CNTL 0xc0200088
1060#define ixLCLK_DEEP_SLEEP_CNTL 0xc020008c
1061#define ixLCLK_DEEP_SLEEP_CNTL2 0xc0200310
1062#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1 0xc02000f0
1063#define ixCG_ULV_PARAMETER 0xc020015c
1064#define ixSCLK_MIN_DIV 0xc02003ac
1065#define ixPWR_AVFS_SEL 0xc0200384
1066#define ixPWR_AVFS_CNTL 0xc0200388
1067#define ixPWR_AVFS0_CNTL_STATUS 0xc0200400
1068#define ixPWR_AVFS1_CNTL_STATUS 0xc0200404
1069#define ixPWR_AVFS2_CNTL_STATUS 0xc0200408
1070#define ixPWR_AVFS3_CNTL_STATUS 0xc020040c
1071#define ixPWR_AVFS4_CNTL_STATUS 0xc0200410
1072#define ixPWR_AVFS5_CNTL_STATUS 0xc0200414
1073#define ixPWR_AVFS6_CNTL_STATUS 0xc0200418
1074#define ixPWR_AVFS7_CNTL_STATUS 0xc020041c
1075#define ixPWR_AVFS8_CNTL_STATUS 0xc0200420
1076#define ixPWR_AVFS9_CNTL_STATUS 0xc0200424
1077#define ixPWR_AVFS10_CNTL_STATUS 0xc0200428
1078#define ixPWR_AVFS11_CNTL_STATUS 0xc020042c
1079#define ixPWR_AVFS12_CNTL_STATUS 0xc0200430
1080#define ixPWR_AVFS13_CNTL_STATUS 0xc0200434
1081#define ixPWR_AVFS14_CNTL_STATUS 0xc0200438
1082#define ixPWR_AVFS15_CNTL_STATUS 0xc020043c
1083#define ixPWR_AVFS16_CNTL_STATUS 0xc0200440
1084#define ixPWR_AVFS17_CNTL_STATUS 0xc0200444
1085#define ixPWR_AVFS18_CNTL_STATUS 0xc0200448
1086#define ixPWR_AVFS19_CNTL_STATUS 0xc020044c
1087#define ixPWR_AVFS20_CNTL_STATUS 0xc0200450
1088#define ixPWR_AVFS21_CNTL_STATUS 0xc0200454
1089#define ixPWR_AVFS22_CNTL_STATUS 0xc0200458
1090#define ixPWR_AVFS23_CNTL_STATUS 0xc020045c
1091#define ixPWR_AVFS24_CNTL_STATUS 0xc0200460
1092#define ixPWR_AVFS25_CNTL_STATUS 0xc0200464
1093#define ixPWR_AVFS26_CNTL_STATUS 0xc0200468
1094#define ixPWR_AVFS27_CNTL_STATUS 0xc020046c
1095#define ixPWR_CKS_ENABLE 0xc020034c
1096#define ixPWR_CKS_CNTL 0xc0200350
1097#define ixPWR_DISP_TIMER_CONTROL 0xc02003c0
1098#define ixPWR_DISP_TIMER_DEBUG 0xc02003c4
1099#define ixPWR_DISP_TIMER2_CONTROL 0xc02003c8
1100#define ixPWR_DISP_TIMER2_DEBUG 0xc02003cc
1101#define ixPWR_DISP_TIMER_CONTROL2 0xc0200378
1102#define ixVDDGFX_IDLE_PARAMETER 0xc020036c
1103#define ixVDDGFX_IDLE_CONTROL 0xc0200370
1104#define ixVDDGFX_IDLE_EXIT 0xc0200374
1105#define ixLCAC_MC0_CNTL 0xc0400130
1106#define ixLCAC_MC0_OVR_SEL 0xc0400134
1107#define ixLCAC_MC0_OVR_VAL 0xc0400138
1108#define ixLCAC_MC1_CNTL 0xc040013c
1109#define ixLCAC_MC1_OVR_SEL 0xc0400140
1110#define ixLCAC_MC1_OVR_VAL 0xc0400144
1111#define ixLCAC_MC2_CNTL 0xc0400148
1112#define ixLCAC_MC2_OVR_SEL 0xc040014c
1113#define ixLCAC_MC2_OVR_VAL 0xc0400150
1114#define ixLCAC_MC3_CNTL 0xc0400154
1115#define ixLCAC_MC3_OVR_SEL 0xc0400158
1116#define ixLCAC_MC3_OVR_VAL 0xc040015c
1117#define ixLCAC_MC4_CNTL 0xc0400d60
1118#define ixLCAC_MC4_OVR_SEL 0xc0400d64
1119#define ixLCAC_MC4_OVR_VAL 0xc0400d68
1120#define ixLCAC_MC5_CNTL 0xc0400d6c
1121#define ixLCAC_MC5_OVR_SEL 0xc0400d70
1122#define ixLCAC_MC5_OVR_VAL 0xc0400d74
1123#define ixLCAC_MC6_CNTL 0xc0400d78
1124#define ixLCAC_MC6_OVR_SEL 0xc0400d7c
1125#define ixLCAC_MC6_OVR_VAL 0xc0400d80
1126#define ixLCAC_MC7_CNTL 0xc0400d84
1127#define ixLCAC_MC7_OVR_SEL 0xc0400d88
1128#define ixLCAC_MC7_OVR_VAL 0xc0400d8c
1129#define ixLCAC_CPL_CNTL 0xc0400160
1130#define ixLCAC_CPL_OVR_SEL 0xc0400164
1131#define ixLCAC_CPL_OVR_VAL 0xc0400168
1132#define mmROM_SMC_IND_INDEX 0x80
1133#define mmROM0_ROM_SMC_IND_INDEX 0x80
1134#define mmROM1_ROM_SMC_IND_INDEX 0x82
1135#define mmROM2_ROM_SMC_IND_INDEX 0x84
1136#define mmROM3_ROM_SMC_IND_INDEX 0x86
1137#define mmROM_SMC_IND_DATA 0x81
1138#define mmROM0_ROM_SMC_IND_DATA 0x81
1139#define mmROM1_ROM_SMC_IND_DATA 0x83
1140#define mmROM2_ROM_SMC_IND_DATA 0x85
1141#define mmROM3_ROM_SMC_IND_DATA 0x87
1142#define ixROM_CNTL 0xc0600000
1143#define ixPAGE_MIRROR_CNTL 0xc0600004
1144#define ixROM_STATUS 0xc0600008
1145#define ixCGTT_ROM_CLK_CTRL0 0xc060000c
1146#define ixROM_INDEX 0xc0600010
1147#define ixROM_DATA 0xc0600014
1148#define ixROM_START 0xc0600018
1149#define ixROM_SW_CNTL 0xc060001c
1150#define ixROM_SW_STATUS 0xc0600020
1151#define ixROM_SW_COMMAND 0xc0600024
1152#define ixROM_SW_DATA_1 0xc0600028
1153#define ixROM_SW_DATA_2 0xc060002c
1154#define ixROM_SW_DATA_3 0xc0600030
1155#define ixROM_SW_DATA_4 0xc0600034
1156#define ixROM_SW_DATA_5 0xc0600038
1157#define ixROM_SW_DATA_6 0xc060003c
1158#define ixROM_SW_DATA_7 0xc0600040
1159#define ixROM_SW_DATA_8 0xc0600044
1160#define ixROM_SW_DATA_9 0xc0600048
1161#define ixROM_SW_DATA_10 0xc060004c
1162#define ixROM_SW_DATA_11 0xc0600050
1163#define ixROM_SW_DATA_12 0xc0600054
1164#define ixROM_SW_DATA_13 0xc0600058
1165#define ixROM_SW_DATA_14 0xc060005c
1166#define ixROM_SW_DATA_15 0xc0600060
1167#define ixROM_SW_DATA_16 0xc0600064
1168#define ixROM_SW_DATA_17 0xc0600068
1169#define ixROM_SW_DATA_18 0xc060006c
1170#define ixROM_SW_DATA_19 0xc0600070
1171#define ixROM_SW_DATA_20 0xc0600074
1172#define ixROM_SW_DATA_21 0xc0600078
1173#define ixROM_SW_DATA_22 0xc060007c
1174#define ixROM_SW_DATA_23 0xc0600080
1175#define ixROM_SW_DATA_24 0xc0600084
1176#define ixROM_SW_DATA_25 0xc0600088
1177#define ixROM_SW_DATA_26 0xc060008c
1178#define ixROM_SW_DATA_27 0xc0600090
1179#define ixROM_SW_DATA_28 0xc0600094
1180#define ixROM_SW_DATA_29 0xc0600098
1181#define ixROM_SW_DATA_30 0xc060009c
1182#define ixROM_SW_DATA_31 0xc06000a0
1183#define ixROM_SW_DATA_32 0xc06000a4
1184#define ixROM_SW_DATA_33 0xc06000a8
1185#define ixROM_SW_DATA_34 0xc06000ac
1186#define ixROM_SW_DATA_35 0xc06000b0
1187#define ixROM_SW_DATA_36 0xc06000b4
1188#define ixROM_SW_DATA_37 0xc06000b8
1189#define ixROM_SW_DATA_38 0xc06000bc
1190#define ixROM_SW_DATA_39 0xc06000c0
1191#define ixROM_SW_DATA_40 0xc06000c4
1192#define ixROM_SW_DATA_41 0xc06000c8
1193#define ixROM_SW_DATA_42 0xc06000cc
1194#define ixROM_SW_DATA_43 0xc06000d0
1195#define ixROM_SW_DATA_44 0xc06000d4
1196#define ixROM_SW_DATA_45 0xc06000d8
1197#define ixROM_SW_DATA_46 0xc06000dc
1198#define ixROM_SW_DATA_47 0xc06000e0
1199#define ixROM_SW_DATA_48 0xc06000e4
1200#define ixROM_SW_DATA_49 0xc06000e8
1201#define ixROM_SW_DATA_50 0xc06000ec
1202#define ixROM_SW_DATA_51 0xc06000f0
1203#define ixROM_SW_DATA_52 0xc06000f4
1204#define ixROM_SW_DATA_53 0xc06000f8
1205#define ixROM_SW_DATA_54 0xc06000fc
1206#define ixROM_SW_DATA_55 0xc0600100
1207#define ixROM_SW_DATA_56 0xc0600104
1208#define ixROM_SW_DATA_57 0xc0600108
1209#define ixROM_SW_DATA_58 0xc060010c
1210#define ixROM_SW_DATA_59 0xc0600110
1211#define ixROM_SW_DATA_60 0xc0600114
1212#define ixROM_SW_DATA_61 0xc0600118
1213#define ixROM_SW_DATA_62 0xc060011c
1214#define ixROM_SW_DATA_63 0xc0600120
1215#define ixROM_SW_DATA_64 0xc0600124
1216#define mmGC_CAC_CGTT_CLK_CTRL 0x3292
1217#define mmSE_CAC_CGTT_CLK_CTRL 0x3293
1218#define mmGC_CAC_LKG_AGGR_LOWER 0x3296
1219#define mmGC_CAC_LKG_AGGR_UPPER 0x3297
1220#define ixGC_CAC_WEIGHT_CU_0 0x32
1221#define ixGC_CAC_WEIGHT_CU_1 0x33
1222#define ixGC_CAC_WEIGHT_CU_2 0x34
1223#define ixGC_CAC_WEIGHT_CU_3 0x35
1224#define ixGC_CAC_WEIGHT_CU_4 0x36
1225#define ixGC_CAC_WEIGHT_CU_5 0x37
1226#define ixGC_CAC_WEIGHT_CU_6 0x38
1227#define ixGC_CAC_WEIGHT_CU_7 0x39
1228#define ixGC_CAC_ACC_CU0 0xba
1229#define ixGC_CAC_ACC_CU1 0xbb
1230#define ixGC_CAC_ACC_CU2 0xbc
1231#define ixGC_CAC_ACC_CU3 0xbd
1232#define ixGC_CAC_ACC_CU4 0xbe
1233#define ixGC_CAC_ACC_CU5 0xbf
1234#define ixGC_CAC_ACC_CU6 0xc0
1235#define ixGC_CAC_ACC_CU7 0xc1
1236#define ixGC_CAC_ACC_CU8 0xc2
1237#define ixGC_CAC_ACC_CU9 0xc3
1238#define ixGC_CAC_ACC_CU10 0xc4
1239#define ixGC_CAC_ACC_CU11 0xc5
1240#define ixGC_CAC_ACC_CU12 0xc6
1241#define ixGC_CAC_ACC_CU13 0xc7
1242#define ixGC_CAC_ACC_CU14 0xc8
1243#define ixGC_CAC_ACC_CU15 0xc9
1244#define ixGC_CAC_OVRD_CU 0xe7
1245
1246#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
new file mode 100644
index 000000000000..f19c4208d963
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
@@ -0,0 +1,1282 @@
1/*
2 * SMU_7_1_3 Register documentation
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
20 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef SMU_7_1_3_ENUM_H
25#define SMU_7_1_3_ENUM_H
26
27#define CG_SRBM_START_ADDR 0x600
28#define CG_SRBM_END_ADDR 0x8ff
29#define RCU_CCF_DWORDS0 0xa0
30#define RCU_CCF_BITS0 0x1400
31#define RCU_SAM_BYTES 0x2c
32#define RCU_SAM_RTL_BYTES 0x2c
33#define RCU_SMU_BYTES 0x14
34#define RCU_SMU_RTL_BYTES 0x14
35#define SFP_CHAIN_ADDR 0x1
36#define SFP_SADR 0x0
37#define SFP_EADR 0x37f
38#define SAMU_KEY_CHAIN_ADR 0x0
39#define SAMU_KEY_SADR 0x280
40#define SAMU_KEY_EADR 0x2ab
41#define SMU_KEY_CHAIN_ADR 0x0
42#define SMU_KEY_SADR 0x2ac
43#define SMU_KEY_EADR 0x2bf
44#define SMC_MSG_TEST 0x1
45#define SMC_MSG_PHY_LN_OFF 0x2
46#define SMC_MSG_PHY_LN_ON 0x3
47#define SMC_MSG_DDI_PHY_OFF 0x4
48#define SMC_MSG_DDI_PHY_ON 0x5
49#define SMC_MSG_CASCADE_PLL_OFF 0x6
50#define SMC_MSG_CASCADE_PLL_ON 0x7
51#define SMC_MSG_PWR_OFF_x16 0x8
52#define SMC_MSG_CONFIG_LCLK_DPM 0x9
53#define SMC_MSG_FLUSH_DATA_CACHE 0xa
54#define SMC_MSG_FLUSH_INSTRUCTION_CACHE 0xb
55#define SMC_MSG_CONFIG_VPC_ACCUMULATOR 0xc
56#define SMC_MSG_CONFIG_BAPM 0xd
57#define SMC_MSG_CONFIG_TDC_LIMIT 0xe
58#define SMC_MSG_CONFIG_LPMx 0xf
59#define SMC_MSG_CONFIG_HTC_LIMIT 0x10
60#define SMC_MSG_CONFIG_THERMAL_CNTL 0x11
61#define SMC_MSG_CONFIG_VOLTAGE_CNTL 0x12
62#define SMC_MSG_CONFIG_TDP_CNTL 0x13
63#define SMC_MSG_EN_PM_CNTL 0x14
64#define SMC_MSG_DIS_PM_CNTL 0x15
65#define SMC_MSG_CONFIG_NBDPM 0x16
66#define SMC_MSG_CONFIG_LOADLINE 0x17
67#define SMC_MSG_ADJUST_LOADLINE 0x18
68#define SMC_MSG_RESET 0x20
69#define SMC_MSG_VOLTAGE 0x25
70#define SMC_VERSION_MAJOR 0x7
71#define SMC_VERSION_MINOR 0x0
72#define SMC_HEADER_SIZE 0x40
73#define ROM_SIGNATURE 0xaa55
74typedef enum SurfaceEndian {
75 ENDIAN_NONE = 0x0,
76 ENDIAN_8IN16 = 0x1,
77 ENDIAN_8IN32 = 0x2,
78 ENDIAN_8IN64 = 0x3,
79} SurfaceEndian;
80typedef enum ArrayMode {
81 ARRAY_LINEAR_GENERAL = 0x0,
82 ARRAY_LINEAR_ALIGNED = 0x1,
83 ARRAY_1D_TILED_THIN1 = 0x2,
84 ARRAY_1D_TILED_THICK = 0x3,
85 ARRAY_2D_TILED_THIN1 = 0x4,
86 ARRAY_PRT_TILED_THIN1 = 0x5,
87 ARRAY_PRT_2D_TILED_THIN1 = 0x6,
88 ARRAY_2D_TILED_THICK = 0x7,
89 ARRAY_2D_TILED_XTHICK = 0x8,
90 ARRAY_PRT_TILED_THICK = 0x9,
91 ARRAY_PRT_2D_TILED_THICK = 0xa,
92 ARRAY_PRT_3D_TILED_THIN1 = 0xb,
93 ARRAY_3D_TILED_THIN1 = 0xc,
94 ARRAY_3D_TILED_THICK = 0xd,
95 ARRAY_3D_TILED_XTHICK = 0xe,
96 ARRAY_PRT_3D_TILED_THICK = 0xf,
97} ArrayMode;
98typedef enum PipeTiling {
99 CONFIG_1_PIPE = 0x0,
100 CONFIG_2_PIPE = 0x1,
101 CONFIG_4_PIPE = 0x2,
102 CONFIG_8_PIPE = 0x3,
103} PipeTiling;
104typedef enum BankTiling {
105 CONFIG_4_BANK = 0x0,
106 CONFIG_8_BANK = 0x1,
107} BankTiling;
108typedef enum GroupInterleave {
109 CONFIG_256B_GROUP = 0x0,
110 CONFIG_512B_GROUP = 0x1,
111} GroupInterleave;
112typedef enum RowTiling {
113 CONFIG_1KB_ROW = 0x0,
114 CONFIG_2KB_ROW = 0x1,
115 CONFIG_4KB_ROW = 0x2,
116 CONFIG_8KB_ROW = 0x3,
117 CONFIG_1KB_ROW_OPT = 0x4,
118 CONFIG_2KB_ROW_OPT = 0x5,
119 CONFIG_4KB_ROW_OPT = 0x6,
120 CONFIG_8KB_ROW_OPT = 0x7,
121} RowTiling;
122typedef enum BankSwapBytes {
123 CONFIG_128B_SWAPS = 0x0,
124 CONFIG_256B_SWAPS = 0x1,
125 CONFIG_512B_SWAPS = 0x2,
126 CONFIG_1KB_SWAPS = 0x3,
127} BankSwapBytes;
128typedef enum SampleSplitBytes {
129 CONFIG_1KB_SPLIT = 0x0,
130 CONFIG_2KB_SPLIT = 0x1,
131 CONFIG_4KB_SPLIT = 0x2,
132 CONFIG_8KB_SPLIT = 0x3,
133} SampleSplitBytes;
134typedef enum NumPipes {
135 ADDR_CONFIG_1_PIPE = 0x0,
136 ADDR_CONFIG_2_PIPE = 0x1,
137 ADDR_CONFIG_4_PIPE = 0x2,
138 ADDR_CONFIG_8_PIPE = 0x3,
139} NumPipes;
140typedef enum PipeInterleaveSize {
141 ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0,
142 ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1,
143} PipeInterleaveSize;
144typedef enum BankInterleaveSize {
145 ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0,
146 ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1,
147 ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2,
148 ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3,
149} BankInterleaveSize;
150typedef enum NumShaderEngines {
151 ADDR_CONFIG_1_SHADER_ENGINE = 0x0,
152 ADDR_CONFIG_2_SHADER_ENGINE = 0x1,
153} NumShaderEngines;
154typedef enum ShaderEngineTileSize {
155 ADDR_CONFIG_SE_TILE_16 = 0x0,
156 ADDR_CONFIG_SE_TILE_32 = 0x1,
157} ShaderEngineTileSize;
158typedef enum NumGPUs {
159 ADDR_CONFIG_1_GPU = 0x0,
160 ADDR_CONFIG_2_GPU = 0x1,
161 ADDR_CONFIG_4_GPU = 0x2,
162} NumGPUs;
163typedef enum MultiGPUTileSize {
164 ADDR_CONFIG_GPU_TILE_16 = 0x0,
165 ADDR_CONFIG_GPU_TILE_32 = 0x1,
166 ADDR_CONFIG_GPU_TILE_64 = 0x2,
167 ADDR_CONFIG_GPU_TILE_128 = 0x3,
168} MultiGPUTileSize;
169typedef enum RowSize {
170 ADDR_CONFIG_1KB_ROW = 0x0,
171 ADDR_CONFIG_2KB_ROW = 0x1,
172 ADDR_CONFIG_4KB_ROW = 0x2,
173} RowSize;
174typedef enum NumLowerPipes {
175 ADDR_CONFIG_1_LOWER_PIPES = 0x0,
176 ADDR_CONFIG_2_LOWER_PIPES = 0x1,
177} NumLowerPipes;
178typedef enum DebugBlockId {
179 DBG_CLIENT_BLKID_RESERVED = 0x0,
180 DBG_CLIENT_BLKID_dbg = 0x1,
181 DBG_CLIENT_BLKID_scf2 = 0x2,
182 DBG_CLIENT_BLKID_mcd5_0 = 0x3,
183 DBG_CLIENT_BLKID_mcd5_1 = 0x4,
184 DBG_CLIENT_BLKID_mcd6_0 = 0x5,
185 DBG_CLIENT_BLKID_mcd6_1 = 0x6,
186 DBG_CLIENT_BLKID_mcd7_0 = 0x7,
187 DBG_CLIENT_BLKID_mcd7_1 = 0x8,
188 DBG_CLIENT_BLKID_vmc = 0x9,
189 DBG_CLIENT_BLKID_sx30 = 0xa,
190 DBG_CLIENT_BLKID_mcd2_0 = 0xb,
191 DBG_CLIENT_BLKID_mcd2_1 = 0xc,
192 DBG_CLIENT_BLKID_bci1 = 0xd,
193 DBG_CLIENT_BLKID_xdma_dbg_client_wrapper = 0xe,
194 DBG_CLIENT_BLKID_mcc0 = 0xf,
195 DBG_CLIENT_BLKID_uvdf_0 = 0x10,
196 DBG_CLIENT_BLKID_uvdf_1 = 0x11,
197 DBG_CLIENT_BLKID_uvdf_2 = 0x12,
198 DBG_CLIENT_BLKID_bci0 = 0x13,
199 DBG_CLIENT_BLKID_vcec0_0 = 0x14,
200 DBG_CLIENT_BLKID_cb100 = 0x15,
201 DBG_CLIENT_BLKID_cb001 = 0x16,
202 DBG_CLIENT_BLKID_cb002 = 0x17,
203 DBG_CLIENT_BLKID_cb003 = 0x18,
204 DBG_CLIENT_BLKID_mcd4_0 = 0x19,
205 DBG_CLIENT_BLKID_mcd4_1 = 0x1a,
206 DBG_CLIENT_BLKID_tmonw00 = 0x1b,
207 DBG_CLIENT_BLKID_cb101 = 0x1c,
208 DBG_CLIENT_BLKID_cb102 = 0x1d,
209 DBG_CLIENT_BLKID_cb103 = 0x1e,
210 DBG_CLIENT_BLKID_sx10 = 0x1f,
211 DBG_CLIENT_BLKID_cb301 = 0x20,
212 DBG_CLIENT_BLKID_cb302 = 0x21,
213 DBG_CLIENT_BLKID_cb303 = 0x22,
214 DBG_CLIENT_BLKID_tmonw01 = 0x23,
215 DBG_CLIENT_BLKID_tmonw02 = 0x24,
216 DBG_CLIENT_BLKID_vcea0_0 = 0x25,
217 DBG_CLIENT_BLKID_vcea0_1 = 0x26,
218 DBG_CLIENT_BLKID_vcea0_2 = 0x27,
219 DBG_CLIENT_BLKID_vcea0_3 = 0x28,
220 DBG_CLIENT_BLKID_scf1 = 0x29,
221 DBG_CLIENT_BLKID_sx20 = 0x2a,
222 DBG_CLIENT_BLKID_spim1 = 0x2b,
223 DBG_CLIENT_BLKID_scb1 = 0x2c,
224 DBG_CLIENT_BLKID_pa10 = 0x2d,
225 DBG_CLIENT_BLKID_pa00 = 0x2e,
226 DBG_CLIENT_BLKID_gmcon = 0x2f,
227 DBG_CLIENT_BLKID_mcb = 0x30,
228 DBG_CLIENT_BLKID_vgt0 = 0x31,
229 DBG_CLIENT_BLKID_pc0 = 0x32,
230 DBG_CLIENT_BLKID_bci2 = 0x33,
231 DBG_CLIENT_BLKID_uvdb_0 = 0x34,
232 DBG_CLIENT_BLKID_spim3 = 0x35,
233 DBG_CLIENT_BLKID_scb3 = 0x36,
234 DBG_CLIENT_BLKID_cpc_0 = 0x37,
235 DBG_CLIENT_BLKID_cpc_1 = 0x38,
236 DBG_CLIENT_BLKID_uvdm_0 = 0x39,
237 DBG_CLIENT_BLKID_uvdm_1 = 0x3a,
238 DBG_CLIENT_BLKID_uvdm_2 = 0x3b,
239 DBG_CLIENT_BLKID_uvdm_3 = 0x3c,
240 DBG_CLIENT_BLKID_cb000 = 0x3d,
241 DBG_CLIENT_BLKID_spim0 = 0x3e,
242 DBG_CLIENT_BLKID_scb0 = 0x3f,
243 DBG_CLIENT_BLKID_mcc2 = 0x40,
244 DBG_CLIENT_BLKID_ds0 = 0x41,
245 DBG_CLIENT_BLKID_srbm = 0x42,
246 DBG_CLIENT_BLKID_ih = 0x43,
247 DBG_CLIENT_BLKID_sem = 0x44,
248 DBG_CLIENT_BLKID_sdma_0 = 0x45,
249 DBG_CLIENT_BLKID_sdma_1 = 0x46,
250 DBG_CLIENT_BLKID_hdp = 0x47,
251 DBG_CLIENT_BLKID_acp_0 = 0x48,
252 DBG_CLIENT_BLKID_acp_1 = 0x49,
253 DBG_CLIENT_BLKID_cb200 = 0x4a,
254 DBG_CLIENT_BLKID_scf3 = 0x4b,
255 DBG_CLIENT_BLKID_bci3 = 0x4c,
256 DBG_CLIENT_BLKID_mcd0_0 = 0x4d,
257 DBG_CLIENT_BLKID_mcd0_1 = 0x4e,
258 DBG_CLIENT_BLKID_pa11 = 0x4f,
259 DBG_CLIENT_BLKID_pa01 = 0x50,
260 DBG_CLIENT_BLKID_cb201 = 0x51,
261 DBG_CLIENT_BLKID_cb202 = 0x52,
262 DBG_CLIENT_BLKID_cb203 = 0x53,
263 DBG_CLIENT_BLKID_spim2 = 0x54,
264 DBG_CLIENT_BLKID_scb2 = 0x55,
265 DBG_CLIENT_BLKID_vgt2 = 0x56,
266 DBG_CLIENT_BLKID_pc2 = 0x57,
267 DBG_CLIENT_BLKID_smu_0 = 0x58,
268 DBG_CLIENT_BLKID_smu_1 = 0x59,
269 DBG_CLIENT_BLKID_smu_2 = 0x5a,
270 DBG_CLIENT_BLKID_cb1 = 0x5b,
271 DBG_CLIENT_BLKID_ia0 = 0x5c,
272 DBG_CLIENT_BLKID_wd = 0x5d,
273 DBG_CLIENT_BLKID_ia1 = 0x5e,
274 DBG_CLIENT_BLKID_scf0 = 0x5f,
275 DBG_CLIENT_BLKID_vgt1 = 0x60,
276 DBG_CLIENT_BLKID_pc1 = 0x61,
277 DBG_CLIENT_BLKID_cb0 = 0x62,
278 DBG_CLIENT_BLKID_gdc_one_0 = 0x63,
279 DBG_CLIENT_BLKID_gdc_one_1 = 0x64,
280 DBG_CLIENT_BLKID_gdc_one_2 = 0x65,
281 DBG_CLIENT_BLKID_gdc_one_3 = 0x66,
282 DBG_CLIENT_BLKID_gdc_one_4 = 0x67,
283 DBG_CLIENT_BLKID_gdc_one_5 = 0x68,
284 DBG_CLIENT_BLKID_gdc_one_6 = 0x69,
285 DBG_CLIENT_BLKID_gdc_one_7 = 0x6a,
286 DBG_CLIENT_BLKID_gdc_one_8 = 0x6b,
287 DBG_CLIENT_BLKID_gdc_one_9 = 0x6c,
288 DBG_CLIENT_BLKID_gdc_one_10 = 0x6d,
289 DBG_CLIENT_BLKID_gdc_one_11 = 0x6e,
290 DBG_CLIENT_BLKID_gdc_one_12 = 0x6f,
291 DBG_CLIENT_BLKID_gdc_one_13 = 0x70,
292 DBG_CLIENT_BLKID_gdc_one_14 = 0x71,
293 DBG_CLIENT_BLKID_gdc_one_15 = 0x72,
294 DBG_CLIENT_BLKID_gdc_one_16 = 0x73,
295 DBG_CLIENT_BLKID_gdc_one_17 = 0x74,
296 DBG_CLIENT_BLKID_gdc_one_18 = 0x75,
297 DBG_CLIENT_BLKID_gdc_one_19 = 0x76,
298 DBG_CLIENT_BLKID_gdc_one_20 = 0x77,
299 DBG_CLIENT_BLKID_gdc_one_21 = 0x78,
300 DBG_CLIENT_BLKID_gdc_one_22 = 0x79,
301 DBG_CLIENT_BLKID_gdc_one_23 = 0x7a,
302 DBG_CLIENT_BLKID_gdc_one_24 = 0x7b,
303 DBG_CLIENT_BLKID_gdc_one_25 = 0x7c,
304 DBG_CLIENT_BLKID_gdc_one_26 = 0x7d,
305 DBG_CLIENT_BLKID_gdc_one_27 = 0x7e,
306 DBG_CLIENT_BLKID_gdc_one_28 = 0x7f,
307 DBG_CLIENT_BLKID_gdc_one_29 = 0x80,
308 DBG_CLIENT_BLKID_gdc_one_30 = 0x81,
309 DBG_CLIENT_BLKID_gdc_one_31 = 0x82,
310 DBG_CLIENT_BLKID_gdc_one_32 = 0x83,
311 DBG_CLIENT_BLKID_gdc_one_33 = 0x84,
312 DBG_CLIENT_BLKID_gdc_one_34 = 0x85,
313 DBG_CLIENT_BLKID_gdc_one_35 = 0x86,
314 DBG_CLIENT_BLKID_vceb0_0 = 0x87,
315 DBG_CLIENT_BLKID_vgt3 = 0x88,
316 DBG_CLIENT_BLKID_pc3 = 0x89,
317 DBG_CLIENT_BLKID_mcd3_0 = 0x8a,
318 DBG_CLIENT_BLKID_mcd3_1 = 0x8b,
319 DBG_CLIENT_BLKID_uvdu_0 = 0x8c,
320 DBG_CLIENT_BLKID_uvdu_1 = 0x8d,
321 DBG_CLIENT_BLKID_uvdu_2 = 0x8e,
322 DBG_CLIENT_BLKID_uvdu_3 = 0x8f,
323 DBG_CLIENT_BLKID_uvdu_4 = 0x90,
324 DBG_CLIENT_BLKID_uvdu_5 = 0x91,
325 DBG_CLIENT_BLKID_uvdu_6 = 0x92,
326 DBG_CLIENT_BLKID_cb300 = 0x93,
327 DBG_CLIENT_BLKID_mcd1_0 = 0x94,
328 DBG_CLIENT_BLKID_mcd1_1 = 0x95,
329 DBG_CLIENT_BLKID_sx00 = 0x96,
330 DBG_CLIENT_BLKID_uvdc_0 = 0x97,
331 DBG_CLIENT_BLKID_uvdc_1 = 0x98,
332 DBG_CLIENT_BLKID_mcc3 = 0x99,
333 DBG_CLIENT_BLKID_mcc4 = 0x9a,
334 DBG_CLIENT_BLKID_mcc5 = 0x9b,
335 DBG_CLIENT_BLKID_mcc6 = 0x9c,
336 DBG_CLIENT_BLKID_mcc7 = 0x9d,
337 DBG_CLIENT_BLKID_cpg_0 = 0x9e,
338 DBG_CLIENT_BLKID_cpg_1 = 0x9f,
339 DBG_CLIENT_BLKID_gck = 0xa0,
340 DBG_CLIENT_BLKID_mcc1 = 0xa1,
341 DBG_CLIENT_BLKID_cpf_0 = 0xa2,
342 DBG_CLIENT_BLKID_cpf_1 = 0xa3,
343 DBG_CLIENT_BLKID_rlc = 0xa4,
344 DBG_CLIENT_BLKID_grbm = 0xa5,
345 DBG_CLIENT_BLKID_sammsp = 0xa6,
346 DBG_CLIENT_BLKID_dci_pg = 0xa7,
347 DBG_CLIENT_BLKID_dci_0 = 0xa8,
348 DBG_CLIENT_BLKID_dccg0_0 = 0xa9,
349 DBG_CLIENT_BLKID_dccg0_1 = 0xaa,
350 DBG_CLIENT_BLKID_dcfe01_0 = 0xab,
351 DBG_CLIENT_BLKID_dcfe02_0 = 0xac,
352 DBG_CLIENT_BLKID_dcfe03_0 = 0xad,
353 DBG_CLIENT_BLKID_dcfe04_0 = 0xae,
354 DBG_CLIENT_BLKID_dcfe05_0 = 0xaf,
355 DBG_CLIENT_BLKID_dcfe06_0 = 0xb0,
356 DBG_CLIENT_BLKID_mcq0_0 = 0xb1,
357 DBG_CLIENT_BLKID_mcq0_1 = 0xb2,
358 DBG_CLIENT_BLKID_mcq1_0 = 0xb3,
359 DBG_CLIENT_BLKID_mcq1_1 = 0xb4,
360 DBG_CLIENT_BLKID_mcq2_0 = 0xb5,
361 DBG_CLIENT_BLKID_mcq2_1 = 0xb6,
362 DBG_CLIENT_BLKID_mcq3_0 = 0xb7,
363 DBG_CLIENT_BLKID_mcq3_1 = 0xb8,
364 DBG_CLIENT_BLKID_mcq4_0 = 0xb9,
365 DBG_CLIENT_BLKID_mcq4_1 = 0xba,
366 DBG_CLIENT_BLKID_mcq5_0 = 0xbb,
367 DBG_CLIENT_BLKID_mcq5_1 = 0xbc,
368 DBG_CLIENT_BLKID_mcq6_0 = 0xbd,
369 DBG_CLIENT_BLKID_mcq6_1 = 0xbe,
370 DBG_CLIENT_BLKID_mcq7_0 = 0xbf,
371 DBG_CLIENT_BLKID_mcq7_1 = 0xc0,
372 DBG_CLIENT_BLKID_uvdi_0 = 0xc1,
373 DBG_CLIENT_BLKID_RESERVED_LAST = 0xc2,
374} DebugBlockId;
375typedef enum DebugBlockId_OLD {
376 DBG_BLOCK_ID_RESERVED = 0x0,
377 DBG_BLOCK_ID_DBG = 0x1,
378 DBG_BLOCK_ID_VMC = 0x2,
379 DBG_BLOCK_ID_PDMA = 0x3,
380 DBG_BLOCK_ID_CG = 0x4,
381 DBG_BLOCK_ID_SRBM = 0x5,
382 DBG_BLOCK_ID_GRBM = 0x6,
383 DBG_BLOCK_ID_RLC = 0x7,
384 DBG_BLOCK_ID_CSC = 0x8,
385 DBG_BLOCK_ID_SEM = 0x9,
386 DBG_BLOCK_ID_IH = 0xa,
387 DBG_BLOCK_ID_SC = 0xb,
388 DBG_BLOCK_ID_SQ = 0xc,
389 DBG_BLOCK_ID_AVP = 0xd,
390 DBG_BLOCK_ID_GMCON = 0xe,
391 DBG_BLOCK_ID_SMU = 0xf,
392 DBG_BLOCK_ID_DMA0 = 0x10,
393 DBG_BLOCK_ID_DMA1 = 0x11,
394 DBG_BLOCK_ID_SPIM = 0x12,
395 DBG_BLOCK_ID_GDS = 0x13,
396 DBG_BLOCK_ID_SPIS = 0x14,
397 DBG_BLOCK_ID_UNUSED0 = 0x15,
398 DBG_BLOCK_ID_PA0 = 0x16,
399 DBG_BLOCK_ID_PA1 = 0x17,
400 DBG_BLOCK_ID_CP0 = 0x18,
401 DBG_BLOCK_ID_CP1 = 0x19,
402 DBG_BLOCK_ID_CP2 = 0x1a,
403 DBG_BLOCK_ID_UNUSED1 = 0x1b,
404 DBG_BLOCK_ID_UVDU = 0x1c,
405 DBG_BLOCK_ID_UVDM = 0x1d,
406 DBG_BLOCK_ID_VCE = 0x1e,
407 DBG_BLOCK_ID_UNUSED2 = 0x1f,
408 DBG_BLOCK_ID_VGT0 = 0x20,
409 DBG_BLOCK_ID_VGT1 = 0x21,
410 DBG_BLOCK_ID_IA = 0x22,
411 DBG_BLOCK_ID_UNUSED3 = 0x23,
412 DBG_BLOCK_ID_SCT0 = 0x24,
413 DBG_BLOCK_ID_SCT1 = 0x25,
414 DBG_BLOCK_ID_SPM0 = 0x26,
415 DBG_BLOCK_ID_SPM1 = 0x27,
416 DBG_BLOCK_ID_TCAA = 0x28,
417 DBG_BLOCK_ID_TCAB = 0x29,
418 DBG_BLOCK_ID_TCCA = 0x2a,
419 DBG_BLOCK_ID_TCCB = 0x2b,
420 DBG_BLOCK_ID_MCC0 = 0x2c,
421 DBG_BLOCK_ID_MCC1 = 0x2d,
422 DBG_BLOCK_ID_MCC2 = 0x2e,
423 DBG_BLOCK_ID_MCC3 = 0x2f,
424 DBG_BLOCK_ID_SX0 = 0x30,
425 DBG_BLOCK_ID_SX1 = 0x31,
426 DBG_BLOCK_ID_SX2 = 0x32,
427 DBG_BLOCK_ID_SX3 = 0x33,
428 DBG_BLOCK_ID_UNUSED4 = 0x34,
429 DBG_BLOCK_ID_UNUSED5 = 0x35,
430 DBG_BLOCK_ID_UNUSED6 = 0x36,
431 DBG_BLOCK_ID_UNUSED7 = 0x37,
432 DBG_BLOCK_ID_PC0 = 0x38,
433 DBG_BLOCK_ID_PC1 = 0x39,
434 DBG_BLOCK_ID_UNUSED8 = 0x3a,
435 DBG_BLOCK_ID_UNUSED9 = 0x3b,
436 DBG_BLOCK_ID_UNUSED10 = 0x3c,
437 DBG_BLOCK_ID_UNUSED11 = 0x3d,
438 DBG_BLOCK_ID_MCB = 0x3e,
439 DBG_BLOCK_ID_UNUSED12 = 0x3f,
440 DBG_BLOCK_ID_SCB0 = 0x40,
441 DBG_BLOCK_ID_SCB1 = 0x41,
442 DBG_BLOCK_ID_UNUSED13 = 0x42,
443 DBG_BLOCK_ID_UNUSED14 = 0x43,
444 DBG_BLOCK_ID_SCF0 = 0x44,
445 DBG_BLOCK_ID_SCF1 = 0x45,
446 DBG_BLOCK_ID_UNUSED15 = 0x46,
447 DBG_BLOCK_ID_UNUSED16 = 0x47,
448 DBG_BLOCK_ID_BCI0 = 0x48,
449 DBG_BLOCK_ID_BCI1 = 0x49,
450 DBG_BLOCK_ID_BCI2 = 0x4a,
451 DBG_BLOCK_ID_BCI3 = 0x4b,
452 DBG_BLOCK_ID_UNUSED17 = 0x4c,
453 DBG_BLOCK_ID_UNUSED18 = 0x4d,
454 DBG_BLOCK_ID_UNUSED19 = 0x4e,
455 DBG_BLOCK_ID_UNUSED20 = 0x4f,
456 DBG_BLOCK_ID_CB00 = 0x50,
457 DBG_BLOCK_ID_CB01 = 0x51,
458 DBG_BLOCK_ID_CB02 = 0x52,
459 DBG_BLOCK_ID_CB03 = 0x53,
460 DBG_BLOCK_ID_CB04 = 0x54,
461 DBG_BLOCK_ID_UNUSED21 = 0x55,
462 DBG_BLOCK_ID_UNUSED22 = 0x56,
463 DBG_BLOCK_ID_UNUSED23 = 0x57,
464 DBG_BLOCK_ID_CB10 = 0x58,
465 DBG_BLOCK_ID_CB11 = 0x59,
466 DBG_BLOCK_ID_CB12 = 0x5a,
467 DBG_BLOCK_ID_CB13 = 0x5b,
468 DBG_BLOCK_ID_CB14 = 0x5c,
469 DBG_BLOCK_ID_UNUSED24 = 0x5d,
470 DBG_BLOCK_ID_UNUSED25 = 0x5e,
471 DBG_BLOCK_ID_UNUSED26 = 0x5f,
472 DBG_BLOCK_ID_TCP0 = 0x60,
473 DBG_BLOCK_ID_TCP1 = 0x61,
474 DBG_BLOCK_ID_TCP2 = 0x62,
475 DBG_BLOCK_ID_TCP3 = 0x63,
476 DBG_BLOCK_ID_TCP4 = 0x64,
477 DBG_BLOCK_ID_TCP5 = 0x65,
478 DBG_BLOCK_ID_TCP6 = 0x66,
479 DBG_BLOCK_ID_TCP7 = 0x67,
480 DBG_BLOCK_ID_TCP8 = 0x68,
481 DBG_BLOCK_ID_TCP9 = 0x69,
482 DBG_BLOCK_ID_TCP10 = 0x6a,
483 DBG_BLOCK_ID_TCP11 = 0x6b,
484 DBG_BLOCK_ID_TCP12 = 0x6c,
485 DBG_BLOCK_ID_TCP13 = 0x6d,
486 DBG_BLOCK_ID_TCP14 = 0x6e,
487 DBG_BLOCK_ID_TCP15 = 0x6f,
488 DBG_BLOCK_ID_TCP16 = 0x70,
489 DBG_BLOCK_ID_TCP17 = 0x71,
490 DBG_BLOCK_ID_TCP18 = 0x72,
491 DBG_BLOCK_ID_TCP19 = 0x73,
492 DBG_BLOCK_ID_TCP20 = 0x74,
493 DBG_BLOCK_ID_TCP21 = 0x75,
494 DBG_BLOCK_ID_TCP22 = 0x76,
495 DBG_BLOCK_ID_TCP23 = 0x77,
496 DBG_BLOCK_ID_TCP_RESERVED0 = 0x78,
497 DBG_BLOCK_ID_TCP_RESERVED1 = 0x79,
498 DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a,
499 DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b,
500 DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c,
501 DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d,
502 DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e,
503 DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f,
504 DBG_BLOCK_ID_DB00 = 0x80,
505 DBG_BLOCK_ID_DB01 = 0x81,
506 DBG_BLOCK_ID_DB02 = 0x82,
507 DBG_BLOCK_ID_DB03 = 0x83,
508 DBG_BLOCK_ID_DB04 = 0x84,
509 DBG_BLOCK_ID_UNUSED27 = 0x85,
510 DBG_BLOCK_ID_UNUSED28 = 0x86,
511 DBG_BLOCK_ID_UNUSED29 = 0x87,
512 DBG_BLOCK_ID_DB10 = 0x88,
513 DBG_BLOCK_ID_DB11 = 0x89,
514 DBG_BLOCK_ID_DB12 = 0x8a,
515 DBG_BLOCK_ID_DB13 = 0x8b,
516 DBG_BLOCK_ID_DB14 = 0x8c,
517 DBG_BLOCK_ID_UNUSED30 = 0x8d,
518 DBG_BLOCK_ID_UNUSED31 = 0x8e,
519 DBG_BLOCK_ID_UNUSED32 = 0x8f,
520 DBG_BLOCK_ID_TCC0 = 0x90,
521 DBG_BLOCK_ID_TCC1 = 0x91,
522 DBG_BLOCK_ID_TCC2 = 0x92,
523 DBG_BLOCK_ID_TCC3 = 0x93,
524 DBG_BLOCK_ID_TCC4 = 0x94,
525 DBG_BLOCK_ID_TCC5 = 0x95,
526 DBG_BLOCK_ID_TCC6 = 0x96,
527 DBG_BLOCK_ID_TCC7 = 0x97,
528 DBG_BLOCK_ID_SPS00 = 0x98,
529 DBG_BLOCK_ID_SPS01 = 0x99,
530 DBG_BLOCK_ID_SPS02 = 0x9a,
531 DBG_BLOCK_ID_SPS10 = 0x9b,
532 DBG_BLOCK_ID_SPS11 = 0x9c,
533 DBG_BLOCK_ID_SPS12 = 0x9d,
534 DBG_BLOCK_ID_UNUSED33 = 0x9e,
535 DBG_BLOCK_ID_UNUSED34 = 0x9f,
536 DBG_BLOCK_ID_TA00 = 0xa0,
537 DBG_BLOCK_ID_TA01 = 0xa1,
538 DBG_BLOCK_ID_TA02 = 0xa2,
539 DBG_BLOCK_ID_TA03 = 0xa3,
540 DBG_BLOCK_ID_TA04 = 0xa4,
541 DBG_BLOCK_ID_TA05 = 0xa5,
542 DBG_BLOCK_ID_TA06 = 0xa6,
543 DBG_BLOCK_ID_TA07 = 0xa7,
544 DBG_BLOCK_ID_TA08 = 0xa8,
545 DBG_BLOCK_ID_TA09 = 0xa9,
546 DBG_BLOCK_ID_TA0A = 0xaa,
547 DBG_BLOCK_ID_TA0B = 0xab,
548 DBG_BLOCK_ID_UNUSED35 = 0xac,
549 DBG_BLOCK_ID_UNUSED36 = 0xad,
550 DBG_BLOCK_ID_UNUSED37 = 0xae,
551 DBG_BLOCK_ID_UNUSED38 = 0xaf,
552 DBG_BLOCK_ID_TA10 = 0xb0,
553 DBG_BLOCK_ID_TA11 = 0xb1,
554 DBG_BLOCK_ID_TA12 = 0xb2,
555 DBG_BLOCK_ID_TA13 = 0xb3,
556 DBG_BLOCK_ID_TA14 = 0xb4,
557 DBG_BLOCK_ID_TA15 = 0xb5,
558 DBG_BLOCK_ID_TA16 = 0xb6,
559 DBG_BLOCK_ID_TA17 = 0xb7,
560 DBG_BLOCK_ID_TA18 = 0xb8,
561 DBG_BLOCK_ID_TA19 = 0xb9,
562 DBG_BLOCK_ID_TA1A = 0xba,
563 DBG_BLOCK_ID_TA1B = 0xbb,
564 DBG_BLOCK_ID_UNUSED39 = 0xbc,
565 DBG_BLOCK_ID_UNUSED40 = 0xbd,
566 DBG_BLOCK_ID_UNUSED41 = 0xbe,
567 DBG_BLOCK_ID_UNUSED42 = 0xbf,
568 DBG_BLOCK_ID_TD00 = 0xc0,
569 DBG_BLOCK_ID_TD01 = 0xc1,
570 DBG_BLOCK_ID_TD02 = 0xc2,
571 DBG_BLOCK_ID_TD03 = 0xc3,
572 DBG_BLOCK_ID_TD04 = 0xc4,
573 DBG_BLOCK_ID_TD05 = 0xc5,
574 DBG_BLOCK_ID_TD06 = 0xc6,
575 DBG_BLOCK_ID_TD07 = 0xc7,
576 DBG_BLOCK_ID_TD08 = 0xc8,
577 DBG_BLOCK_ID_TD09 = 0xc9,
578 DBG_BLOCK_ID_TD0A = 0xca,
579 DBG_BLOCK_ID_TD0B = 0xcb,
580 DBG_BLOCK_ID_UNUSED43 = 0xcc,
581 DBG_BLOCK_ID_UNUSED44 = 0xcd,
582 DBG_BLOCK_ID_UNUSED45 = 0xce,
583 DBG_BLOCK_ID_UNUSED46 = 0xcf,
584 DBG_BLOCK_ID_TD10 = 0xd0,
585 DBG_BLOCK_ID_TD11 = 0xd1,
586 DBG_BLOCK_ID_TD12 = 0xd2,
587 DBG_BLOCK_ID_TD13 = 0xd3,
588 DBG_BLOCK_ID_TD14 = 0xd4,
589 DBG_BLOCK_ID_TD15 = 0xd5,
590 DBG_BLOCK_ID_TD16 = 0xd6,
591 DBG_BLOCK_ID_TD17 = 0xd7,
592 DBG_BLOCK_ID_TD18 = 0xd8,
593 DBG_BLOCK_ID_TD19 = 0xd9,
594 DBG_BLOCK_ID_TD1A = 0xda,
595 DBG_BLOCK_ID_TD1B = 0xdb,
596 DBG_BLOCK_ID_UNUSED47 = 0xdc,
597 DBG_BLOCK_ID_UNUSED48 = 0xdd,
598 DBG_BLOCK_ID_UNUSED49 = 0xde,
599 DBG_BLOCK_ID_UNUSED50 = 0xdf,
600 DBG_BLOCK_ID_MCD0 = 0xe0,
601 DBG_BLOCK_ID_MCD1 = 0xe1,
602 DBG_BLOCK_ID_MCD2 = 0xe2,
603 DBG_BLOCK_ID_MCD3 = 0xe3,
604 DBG_BLOCK_ID_MCD4 = 0xe4,
605 DBG_BLOCK_ID_MCD5 = 0xe5,
606 DBG_BLOCK_ID_UNUSED51 = 0xe6,
607 DBG_BLOCK_ID_UNUSED52 = 0xe7,
608} DebugBlockId_OLD;
609typedef enum DebugBlockId_BY2 {
610 DBG_BLOCK_ID_RESERVED_BY2 = 0x0,
611 DBG_BLOCK_ID_VMC_BY2 = 0x1,
612 DBG_BLOCK_ID_CG_BY2 = 0x2,
613 DBG_BLOCK_ID_GRBM_BY2 = 0x3,
614 DBG_BLOCK_ID_CSC_BY2 = 0x4,
615 DBG_BLOCK_ID_IH_BY2 = 0x5,
616 DBG_BLOCK_ID_SQ_BY2 = 0x6,
617 DBG_BLOCK_ID_GMCON_BY2 = 0x7,
618 DBG_BLOCK_ID_DMA0_BY2 = 0x8,
619 DBG_BLOCK_ID_SPIM_BY2 = 0x9,
620 DBG_BLOCK_ID_SPIS_BY2 = 0xa,
621 DBG_BLOCK_ID_PA0_BY2 = 0xb,
622 DBG_BLOCK_ID_CP0_BY2 = 0xc,
623 DBG_BLOCK_ID_CP2_BY2 = 0xd,
624 DBG_BLOCK_ID_UVDU_BY2 = 0xe,
625 DBG_BLOCK_ID_VCE_BY2 = 0xf,
626 DBG_BLOCK_ID_VGT0_BY2 = 0x10,
627 DBG_BLOCK_ID_IA_BY2 = 0x11,
628 DBG_BLOCK_ID_SCT0_BY2 = 0x12,
629 DBG_BLOCK_ID_SPM0_BY2 = 0x13,
630 DBG_BLOCK_ID_TCAA_BY2 = 0x14,
631 DBG_BLOCK_ID_TCCA_BY2 = 0x15,
632 DBG_BLOCK_ID_MCC0_BY2 = 0x16,
633 DBG_BLOCK_ID_MCC2_BY2 = 0x17,
634 DBG_BLOCK_ID_SX0_BY2 = 0x18,
635 DBG_BLOCK_ID_SX2_BY2 = 0x19,
636 DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a,
637 DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b,
638 DBG_BLOCK_ID_PC0_BY2 = 0x1c,
639 DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d,
640 DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e,
641 DBG_BLOCK_ID_MCB_BY2 = 0x1f,
642 DBG_BLOCK_ID_SCB0_BY2 = 0x20,
643 DBG_BLOCK_ID_UNUSED13_BY2 = 0x21,
644 DBG_BLOCK_ID_SCF0_BY2 = 0x22,
645 DBG_BLOCK_ID_UNUSED15_BY2 = 0x23,
646 DBG_BLOCK_ID_BCI0_BY2 = 0x24,
647 DBG_BLOCK_ID_BCI2_BY2 = 0x25,
648 DBG_BLOCK_ID_UNUSED17_BY2 = 0x26,
649 DBG_BLOCK_ID_UNUSED19_BY2 = 0x27,
650 DBG_BLOCK_ID_CB00_BY2 = 0x28,
651 DBG_BLOCK_ID_CB02_BY2 = 0x29,
652 DBG_BLOCK_ID_CB04_BY2 = 0x2a,
653 DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b,
654 DBG_BLOCK_ID_CB10_BY2 = 0x2c,
655 DBG_BLOCK_ID_CB12_BY2 = 0x2d,
656 DBG_BLOCK_ID_CB14_BY2 = 0x2e,
657 DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f,
658 DBG_BLOCK_ID_TCP0_BY2 = 0x30,
659 DBG_BLOCK_ID_TCP2_BY2 = 0x31,
660 DBG_BLOCK_ID_TCP4_BY2 = 0x32,
661 DBG_BLOCK_ID_TCP6_BY2 = 0x33,
662 DBG_BLOCK_ID_TCP8_BY2 = 0x34,
663 DBG_BLOCK_ID_TCP10_BY2 = 0x35,
664 DBG_BLOCK_ID_TCP12_BY2 = 0x36,
665 DBG_BLOCK_ID_TCP14_BY2 = 0x37,
666 DBG_BLOCK_ID_TCP16_BY2 = 0x38,
667 DBG_BLOCK_ID_TCP18_BY2 = 0x39,
668 DBG_BLOCK_ID_TCP20_BY2 = 0x3a,
669 DBG_BLOCK_ID_TCP22_BY2 = 0x3b,
670 DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c,
671 DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d,
672 DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e,
673 DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f,
674 DBG_BLOCK_ID_DB00_BY2 = 0x40,
675 DBG_BLOCK_ID_DB02_BY2 = 0x41,
676 DBG_BLOCK_ID_DB04_BY2 = 0x42,
677 DBG_BLOCK_ID_UNUSED28_BY2 = 0x43,
678 DBG_BLOCK_ID_DB10_BY2 = 0x44,
679 DBG_BLOCK_ID_DB12_BY2 = 0x45,
680 DBG_BLOCK_ID_DB14_BY2 = 0x46,
681 DBG_BLOCK_ID_UNUSED31_BY2 = 0x47,
682 DBG_BLOCK_ID_TCC0_BY2 = 0x48,
683 DBG_BLOCK_ID_TCC2_BY2 = 0x49,
684 DBG_BLOCK_ID_TCC4_BY2 = 0x4a,
685 DBG_BLOCK_ID_TCC6_BY2 = 0x4b,
686 DBG_BLOCK_ID_SPS00_BY2 = 0x4c,
687 DBG_BLOCK_ID_SPS02_BY2 = 0x4d,
688 DBG_BLOCK_ID_SPS11_BY2 = 0x4e,
689 DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f,
690 DBG_BLOCK_ID_TA00_BY2 = 0x50,
691 DBG_BLOCK_ID_TA02_BY2 = 0x51,
692 DBG_BLOCK_ID_TA04_BY2 = 0x52,
693 DBG_BLOCK_ID_TA06_BY2 = 0x53,
694 DBG_BLOCK_ID_TA08_BY2 = 0x54,
695 DBG_BLOCK_ID_TA0A_BY2 = 0x55,
696 DBG_BLOCK_ID_UNUSED35_BY2 = 0x56,
697 DBG_BLOCK_ID_UNUSED37_BY2 = 0x57,
698 DBG_BLOCK_ID_TA10_BY2 = 0x58,
699 DBG_BLOCK_ID_TA12_BY2 = 0x59,
700 DBG_BLOCK_ID_TA14_BY2 = 0x5a,
701 DBG_BLOCK_ID_TA16_BY2 = 0x5b,
702 DBG_BLOCK_ID_TA18_BY2 = 0x5c,
703 DBG_BLOCK_ID_TA1A_BY2 = 0x5d,
704 DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e,
705 DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f,
706 DBG_BLOCK_ID_TD00_BY2 = 0x60,
707 DBG_BLOCK_ID_TD02_BY2 = 0x61,
708 DBG_BLOCK_ID_TD04_BY2 = 0x62,
709 DBG_BLOCK_ID_TD06_BY2 = 0x63,
710 DBG_BLOCK_ID_TD08_BY2 = 0x64,
711 DBG_BLOCK_ID_TD0A_BY2 = 0x65,
712 DBG_BLOCK_ID_UNUSED43_BY2 = 0x66,
713 DBG_BLOCK_ID_UNUSED45_BY2 = 0x67,
714 DBG_BLOCK_ID_TD10_BY2 = 0x68,
715 DBG_BLOCK_ID_TD12_BY2 = 0x69,
716 DBG_BLOCK_ID_TD14_BY2 = 0x6a,
717 DBG_BLOCK_ID_TD16_BY2 = 0x6b,
718 DBG_BLOCK_ID_TD18_BY2 = 0x6c,
719 DBG_BLOCK_ID_TD1A_BY2 = 0x6d,
720 DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e,
721 DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f,
722 DBG_BLOCK_ID_MCD0_BY2 = 0x70,
723 DBG_BLOCK_ID_MCD2_BY2 = 0x71,
724 DBG_BLOCK_ID_MCD4_BY2 = 0x72,
725 DBG_BLOCK_ID_UNUSED51_BY2 = 0x73,
726} DebugBlockId_BY2;
727typedef enum DebugBlockId_BY4 {
728 DBG_BLOCK_ID_RESERVED_BY4 = 0x0,
729 DBG_BLOCK_ID_CG_BY4 = 0x1,
730 DBG_BLOCK_ID_CSC_BY4 = 0x2,
731 DBG_BLOCK_ID_SQ_BY4 = 0x3,
732 DBG_BLOCK_ID_DMA0_BY4 = 0x4,
733 DBG_BLOCK_ID_SPIS_BY4 = 0x5,
734 DBG_BLOCK_ID_CP0_BY4 = 0x6,
735 DBG_BLOCK_ID_UVDU_BY4 = 0x7,
736 DBG_BLOCK_ID_VGT0_BY4 = 0x8,
737 DBG_BLOCK_ID_SCT0_BY4 = 0x9,
738 DBG_BLOCK_ID_TCAA_BY4 = 0xa,
739 DBG_BLOCK_ID_MCC0_BY4 = 0xb,
740 DBG_BLOCK_ID_SX0_BY4 = 0xc,
741 DBG_BLOCK_ID_UNUSED4_BY4 = 0xd,
742 DBG_BLOCK_ID_PC0_BY4 = 0xe,
743 DBG_BLOCK_ID_UNUSED10_BY4 = 0xf,
744 DBG_BLOCK_ID_SCB0_BY4 = 0x10,
745 DBG_BLOCK_ID_SCF0_BY4 = 0x11,
746 DBG_BLOCK_ID_BCI0_BY4 = 0x12,
747 DBG_BLOCK_ID_UNUSED17_BY4 = 0x13,
748 DBG_BLOCK_ID_CB00_BY4 = 0x14,
749 DBG_BLOCK_ID_CB04_BY4 = 0x15,
750 DBG_BLOCK_ID_CB10_BY4 = 0x16,
751 DBG_BLOCK_ID_CB14_BY4 = 0x17,
752 DBG_BLOCK_ID_TCP0_BY4 = 0x18,
753 DBG_BLOCK_ID_TCP4_BY4 = 0x19,
754 DBG_BLOCK_ID_TCP8_BY4 = 0x1a,
755 DBG_BLOCK_ID_TCP12_BY4 = 0x1b,
756 DBG_BLOCK_ID_TCP16_BY4 = 0x1c,
757 DBG_BLOCK_ID_TCP20_BY4 = 0x1d,
758 DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e,
759 DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f,
760 DBG_BLOCK_ID_DB_BY4 = 0x20,
761 DBG_BLOCK_ID_DB04_BY4 = 0x21,
762 DBG_BLOCK_ID_DB10_BY4 = 0x22,
763 DBG_BLOCK_ID_DB14_BY4 = 0x23,
764 DBG_BLOCK_ID_TCC0_BY4 = 0x24,
765 DBG_BLOCK_ID_TCC4_BY4 = 0x25,
766 DBG_BLOCK_ID_SPS00_BY4 = 0x26,
767 DBG_BLOCK_ID_SPS11_BY4 = 0x27,
768 DBG_BLOCK_ID_TA00_BY4 = 0x28,
769 DBG_BLOCK_ID_TA04_BY4 = 0x29,
770 DBG_BLOCK_ID_TA08_BY4 = 0x2a,
771 DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b,
772 DBG_BLOCK_ID_TA10_BY4 = 0x2c,
773 DBG_BLOCK_ID_TA14_BY4 = 0x2d,
774 DBG_BLOCK_ID_TA18_BY4 = 0x2e,
775 DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f,
776 DBG_BLOCK_ID_TD00_BY4 = 0x30,
777 DBG_BLOCK_ID_TD04_BY4 = 0x31,
778 DBG_BLOCK_ID_TD08_BY4 = 0x32,
779 DBG_BLOCK_ID_UNUSED43_BY4 = 0x33,
780 DBG_BLOCK_ID_TD10_BY4 = 0x34,
781 DBG_BLOCK_ID_TD14_BY4 = 0x35,
782 DBG_BLOCK_ID_TD18_BY4 = 0x36,
783 DBG_BLOCK_ID_UNUSED47_BY4 = 0x37,
784 DBG_BLOCK_ID_MCD0_BY4 = 0x38,
785 DBG_BLOCK_ID_MCD4_BY4 = 0x39,
786} DebugBlockId_BY4;
787typedef enum DebugBlockId_BY8 {
788 DBG_BLOCK_ID_RESERVED_BY8 = 0x0,
789 DBG_BLOCK_ID_CSC_BY8 = 0x1,
790 DBG_BLOCK_ID_DMA0_BY8 = 0x2,
791 DBG_BLOCK_ID_CP0_BY8 = 0x3,
792 DBG_BLOCK_ID_VGT0_BY8 = 0x4,
793 DBG_BLOCK_ID_TCAA_BY8 = 0x5,
794 DBG_BLOCK_ID_SX0_BY8 = 0x6,
795 DBG_BLOCK_ID_PC0_BY8 = 0x7,
796 DBG_BLOCK_ID_SCB0_BY8 = 0x8,
797 DBG_BLOCK_ID_BCI0_BY8 = 0x9,
798 DBG_BLOCK_ID_CB00_BY8 = 0xa,
799 DBG_BLOCK_ID_CB10_BY8 = 0xb,
800 DBG_BLOCK_ID_TCP0_BY8 = 0xc,
801 DBG_BLOCK_ID_TCP8_BY8 = 0xd,
802 DBG_BLOCK_ID_TCP16_BY8 = 0xe,
803 DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf,
804 DBG_BLOCK_ID_DB00_BY8 = 0x10,
805 DBG_BLOCK_ID_DB10_BY8 = 0x11,
806 DBG_BLOCK_ID_TCC0_BY8 = 0x12,
807 DBG_BLOCK_ID_SPS00_BY8 = 0x13,
808 DBG_BLOCK_ID_TA00_BY8 = 0x14,
809 DBG_BLOCK_ID_TA08_BY8 = 0x15,
810 DBG_BLOCK_ID_TA10_BY8 = 0x16,
811 DBG_BLOCK_ID_TA18_BY8 = 0x17,
812 DBG_BLOCK_ID_TD00_BY8 = 0x18,
813 DBG_BLOCK_ID_TD08_BY8 = 0x19,
814 DBG_BLOCK_ID_TD10_BY8 = 0x1a,
815 DBG_BLOCK_ID_TD18_BY8 = 0x1b,
816 DBG_BLOCK_ID_MCD0_BY8 = 0x1c,
817} DebugBlockId_BY8;
818typedef enum DebugBlockId_BY16 {
819 DBG_BLOCK_ID_RESERVED_BY16 = 0x0,
820 DBG_BLOCK_ID_DMA0_BY16 = 0x1,
821 DBG_BLOCK_ID_VGT0_BY16 = 0x2,
822 DBG_BLOCK_ID_SX0_BY16 = 0x3,
823 DBG_BLOCK_ID_SCB0_BY16 = 0x4,
824 DBG_BLOCK_ID_CB00_BY16 = 0x5,
825 DBG_BLOCK_ID_TCP0_BY16 = 0x6,
826 DBG_BLOCK_ID_TCP16_BY16 = 0x7,
827 DBG_BLOCK_ID_DB00_BY16 = 0x8,
828 DBG_BLOCK_ID_TCC0_BY16 = 0x9,
829 DBG_BLOCK_ID_TA00_BY16 = 0xa,
830 DBG_BLOCK_ID_TA10_BY16 = 0xb,
831 DBG_BLOCK_ID_TD00_BY16 = 0xc,
832 DBG_BLOCK_ID_TD10_BY16 = 0xd,
833 DBG_BLOCK_ID_MCD0_BY16 = 0xe,
834} DebugBlockId_BY16;
835typedef enum ColorTransform {
836 DCC_CT_AUTO = 0x0,
837 DCC_CT_NONE = 0x1,
838 ABGR_TO_A_BG_G_RB = 0x2,
839 BGRA_TO_BG_G_RB_A = 0x3,
840} ColorTransform;
841typedef enum CompareRef {
842 REF_NEVER = 0x0,
843 REF_LESS = 0x1,
844 REF_EQUAL = 0x2,
845 REF_LEQUAL = 0x3,
846 REF_GREATER = 0x4,
847 REF_NOTEQUAL = 0x5,
848 REF_GEQUAL = 0x6,
849 REF_ALWAYS = 0x7,
850} CompareRef;
851typedef enum ReadSize {
852 READ_256_BITS = 0x0,
853 READ_512_BITS = 0x1,
854} ReadSize;
855typedef enum DepthFormat {
856 DEPTH_INVALID = 0x0,
857 DEPTH_16 = 0x1,
858 DEPTH_X8_24 = 0x2,
859 DEPTH_8_24 = 0x3,
860 DEPTH_X8_24_FLOAT = 0x4,
861 DEPTH_8_24_FLOAT = 0x5,
862 DEPTH_32_FLOAT = 0x6,
863 DEPTH_X24_8_32_FLOAT = 0x7,
864} DepthFormat;
865typedef enum ZFormat {
866 Z_INVALID = 0x0,
867 Z_16 = 0x1,
868 Z_24 = 0x2,
869 Z_32_FLOAT = 0x3,
870} ZFormat;
871typedef enum StencilFormat {
872 STENCIL_INVALID = 0x0,
873 STENCIL_8 = 0x1,
874} StencilFormat;
875typedef enum CmaskMode {
876 CMASK_CLEAR_NONE = 0x0,
877 CMASK_CLEAR_ONE = 0x1,
878 CMASK_CLEAR_ALL = 0x2,
879 CMASK_ANY_EXPANDED = 0x3,
880 CMASK_ALPHA0_FRAG1 = 0x4,
881 CMASK_ALPHA0_FRAG2 = 0x5,
882 CMASK_ALPHA0_FRAG4 = 0x6,
883 CMASK_ALPHA0_FRAGS = 0x7,
884 CMASK_ALPHA1_FRAG1 = 0x8,
885 CMASK_ALPHA1_FRAG2 = 0x9,
886 CMASK_ALPHA1_FRAG4 = 0xa,
887 CMASK_ALPHA1_FRAGS = 0xb,
888 CMASK_ALPHAX_FRAG1 = 0xc,
889 CMASK_ALPHAX_FRAG2 = 0xd,
890 CMASK_ALPHAX_FRAG4 = 0xe,
891 CMASK_ALPHAX_FRAGS = 0xf,
892} CmaskMode;
893typedef enum QuadExportFormat {
894 EXPORT_UNUSED = 0x0,
895 EXPORT_32_R = 0x1,
896 EXPORT_32_GR = 0x2,
897 EXPORT_32_AR = 0x3,
898 EXPORT_FP16_ABGR = 0x4,
899 EXPORT_UNSIGNED16_ABGR = 0x5,
900 EXPORT_SIGNED16_ABGR = 0x6,
901 EXPORT_32_ABGR = 0x7,
902} QuadExportFormat;
903typedef enum QuadExportFormatOld {
904 EXPORT_4P_32BPC_ABGR = 0x0,
905 EXPORT_4P_16BPC_ABGR = 0x1,
906 EXPORT_4P_32BPC_GR = 0x2,
907 EXPORT_4P_32BPC_AR = 0x3,
908 EXPORT_2P_32BPC_ABGR = 0x4,
909 EXPORT_8P_32BPC_R = 0x5,
910} QuadExportFormatOld;
911typedef enum ColorFormat {
912 COLOR_INVALID = 0x0,
913 COLOR_8 = 0x1,
914 COLOR_16 = 0x2,
915 COLOR_8_8 = 0x3,
916 COLOR_32 = 0x4,
917 COLOR_16_16 = 0x5,
918 COLOR_10_11_11 = 0x6,
919 COLOR_11_11_10 = 0x7,
920 COLOR_10_10_10_2 = 0x8,
921 COLOR_2_10_10_10 = 0x9,
922 COLOR_8_8_8_8 = 0xa,
923 COLOR_32_32 = 0xb,
924 COLOR_16_16_16_16 = 0xc,
925 COLOR_RESERVED_13 = 0xd,
926 COLOR_32_32_32_32 = 0xe,
927 COLOR_RESERVED_15 = 0xf,
928 COLOR_5_6_5 = 0x10,
929 COLOR_1_5_5_5 = 0x11,
930 COLOR_5_5_5_1 = 0x12,
931 COLOR_4_4_4_4 = 0x13,
932 COLOR_8_24 = 0x14,
933 COLOR_24_8 = 0x15,
934 COLOR_X24_8_32_FLOAT = 0x16,
935 COLOR_RESERVED_23 = 0x17,
936} ColorFormat;
937typedef enum SurfaceFormat {
938 FMT_INVALID = 0x0,
939 FMT_8 = 0x1,
940 FMT_16 = 0x2,
941 FMT_8_8 = 0x3,
942 FMT_32 = 0x4,
943 FMT_16_16 = 0x5,
944 FMT_10_11_11 = 0x6,
945 FMT_11_11_10 = 0x7,
946 FMT_10_10_10_2 = 0x8,
947 FMT_2_10_10_10 = 0x9,
948 FMT_8_8_8_8 = 0xa,
949 FMT_32_32 = 0xb,
950 FMT_16_16_16_16 = 0xc,
951 FMT_32_32_32 = 0xd,
952 FMT_32_32_32_32 = 0xe,
953 FMT_RESERVED_4 = 0xf,
954 FMT_5_6_5 = 0x10,
955 FMT_1_5_5_5 = 0x11,
956 FMT_5_5_5_1 = 0x12,
957 FMT_4_4_4_4 = 0x13,
958 FMT_8_24 = 0x14,
959 FMT_24_8 = 0x15,
960 FMT_X24_8_32_FLOAT = 0x16,
961 FMT_RESERVED_33 = 0x17,
962 FMT_11_11_10_FLOAT = 0x18,
963 FMT_16_FLOAT = 0x19,
964 FMT_32_FLOAT = 0x1a,
965 FMT_16_16_FLOAT = 0x1b,
966 FMT_8_24_FLOAT = 0x1c,
967 FMT_24_8_FLOAT = 0x1d,
968 FMT_32_32_FLOAT = 0x1e,
969 FMT_10_11_11_FLOAT = 0x1f,
970 FMT_16_16_16_16_FLOAT = 0x20,
971 FMT_3_3_2 = 0x21,
972 FMT_6_5_5 = 0x22,
973 FMT_32_32_32_32_FLOAT = 0x23,
974 FMT_RESERVED_36 = 0x24,
975 FMT_1 = 0x25,
976 FMT_1_REVERSED = 0x26,
977 FMT_GB_GR = 0x27,
978 FMT_BG_RG = 0x28,
979 FMT_32_AS_8 = 0x29,
980 FMT_32_AS_8_8 = 0x2a,
981 FMT_5_9_9_9_SHAREDEXP = 0x2b,
982 FMT_8_8_8 = 0x2c,
983 FMT_16_16_16 = 0x2d,
984 FMT_16_16_16_FLOAT = 0x2e,
985 FMT_4_4 = 0x2f,
986 FMT_32_32_32_FLOAT = 0x30,
987 FMT_BC1 = 0x31,
988 FMT_BC2 = 0x32,
989 FMT_BC3 = 0x33,
990 FMT_BC4 = 0x34,
991 FMT_BC5 = 0x35,
992 FMT_BC6 = 0x36,
993 FMT_BC7 = 0x37,
994 FMT_32_AS_32_32_32_32 = 0x38,
995 FMT_APC3 = 0x39,
996 FMT_APC4 = 0x3a,
997 FMT_APC5 = 0x3b,
998 FMT_APC6 = 0x3c,
999 FMT_APC7 = 0x3d,
1000 FMT_CTX1 = 0x3e,
1001 FMT_RESERVED_63 = 0x3f,
1002} SurfaceFormat;
1003typedef enum BUF_DATA_FORMAT {
1004 BUF_DATA_FORMAT_INVALID = 0x0,
1005 BUF_DATA_FORMAT_8 = 0x1,
1006 BUF_DATA_FORMAT_16 = 0x2,
1007 BUF_DATA_FORMAT_8_8 = 0x3,
1008 BUF_DATA_FORMAT_32 = 0x4,
1009 BUF_DATA_FORMAT_16_16 = 0x5,
1010 BUF_DATA_FORMAT_10_11_11 = 0x6,
1011 BUF_DATA_FORMAT_11_11_10 = 0x7,
1012 BUF_DATA_FORMAT_10_10_10_2 = 0x8,
1013 BUF_DATA_FORMAT_2_10_10_10 = 0x9,
1014 BUF_DATA_FORMAT_8_8_8_8 = 0xa,
1015 BUF_DATA_FORMAT_32_32 = 0xb,
1016 BUF_DATA_FORMAT_16_16_16_16 = 0xc,
1017 BUF_DATA_FORMAT_32_32_32 = 0xd,
1018 BUF_DATA_FORMAT_32_32_32_32 = 0xe,
1019 BUF_DATA_FORMAT_RESERVED_15 = 0xf,
1020} BUF_DATA_FORMAT;
1021typedef enum IMG_DATA_FORMAT {
1022 IMG_DATA_FORMAT_INVALID = 0x0,
1023 IMG_DATA_FORMAT_8 = 0x1,
1024 IMG_DATA_FORMAT_16 = 0x2,
1025 IMG_DATA_FORMAT_8_8 = 0x3,
1026 IMG_DATA_FORMAT_32 = 0x4,
1027 IMG_DATA_FORMAT_16_16 = 0x5,
1028 IMG_DATA_FORMAT_10_11_11 = 0x6,
1029 IMG_DATA_FORMAT_11_11_10 = 0x7,
1030 IMG_DATA_FORMAT_10_10_10_2 = 0x8,
1031 IMG_DATA_FORMAT_2_10_10_10 = 0x9,
1032 IMG_DATA_FORMAT_8_8_8_8 = 0xa,
1033 IMG_DATA_FORMAT_32_32 = 0xb,
1034 IMG_DATA_FORMAT_16_16_16_16 = 0xc,
1035 IMG_DATA_FORMAT_32_32_32 = 0xd,
1036 IMG_DATA_FORMAT_32_32_32_32 = 0xe,
1037 IMG_DATA_FORMAT_RESERVED_15 = 0xf,
1038 IMG_DATA_FORMAT_5_6_5 = 0x10,
1039 IMG_DATA_FORMAT_1_5_5_5 = 0x11,
1040 IMG_DATA_FORMAT_5_5_5_1 = 0x12,
1041 IMG_DATA_FORMAT_4_4_4_4 = 0x13,
1042 IMG_DATA_FORMAT_8_24 = 0x14,
1043 IMG_DATA_FORMAT_24_8 = 0x15,
1044 IMG_DATA_FORMAT_X24_8_32 = 0x16,
1045 IMG_DATA_FORMAT_RESERVED_23 = 0x17,
1046 IMG_DATA_FORMAT_RESERVED_24 = 0x18,
1047 IMG_DATA_FORMAT_RESERVED_25 = 0x19,
1048 IMG_DATA_FORMAT_RESERVED_26 = 0x1a,
1049 IMG_DATA_FORMAT_RESERVED_27 = 0x1b,
1050 IMG_DATA_FORMAT_RESERVED_28 = 0x1c,
1051 IMG_DATA_FORMAT_RESERVED_29 = 0x1d,
1052 IMG_DATA_FORMAT_RESERVED_30 = 0x1e,
1053 IMG_DATA_FORMAT_RESERVED_31 = 0x1f,
1054 IMG_DATA_FORMAT_GB_GR = 0x20,
1055 IMG_DATA_FORMAT_BG_RG = 0x21,
1056 IMG_DATA_FORMAT_5_9_9_9 = 0x22,
1057 IMG_DATA_FORMAT_BC1 = 0x23,
1058 IMG_DATA_FORMAT_BC2 = 0x24,
1059 IMG_DATA_FORMAT_BC3 = 0x25,
1060 IMG_DATA_FORMAT_BC4 = 0x26,
1061 IMG_DATA_FORMAT_BC5 = 0x27,
1062 IMG_DATA_FORMAT_BC6 = 0x28,
1063 IMG_DATA_FORMAT_BC7 = 0x29,
1064 IMG_DATA_FORMAT_RESERVED_42 = 0x2a,
1065 IMG_DATA_FORMAT_RESERVED_43 = 0x2b,
1066 IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c,
1067 IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d,
1068 IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e,
1069 IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f,
1070 IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30,
1071 IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31,
1072 IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32,
1073 IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33,
1074 IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34,
1075 IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35,
1076 IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36,
1077 IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37,
1078 IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38,
1079 IMG_DATA_FORMAT_4_4 = 0x39,
1080 IMG_DATA_FORMAT_6_5_5 = 0x3a,
1081 IMG_DATA_FORMAT_1 = 0x3b,
1082 IMG_DATA_FORMAT_1_REVERSED = 0x3c,
1083 IMG_DATA_FORMAT_32_AS_8 = 0x3d,
1084 IMG_DATA_FORMAT_32_AS_8_8 = 0x3e,
1085 IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f,
1086} IMG_DATA_FORMAT;
1087typedef enum BUF_NUM_FORMAT {
1088 BUF_NUM_FORMAT_UNORM = 0x0,
1089 BUF_NUM_FORMAT_SNORM = 0x1,
1090 BUF_NUM_FORMAT_USCALED = 0x2,
1091 BUF_NUM_FORMAT_SSCALED = 0x3,
1092 BUF_NUM_FORMAT_UINT = 0x4,
1093 BUF_NUM_FORMAT_SINT = 0x5,
1094 BUF_NUM_FORMAT_RESERVED_6 = 0x6,
1095 BUF_NUM_FORMAT_FLOAT = 0x7,
1096} BUF_NUM_FORMAT;
1097typedef enum IMG_NUM_FORMAT {
1098 IMG_NUM_FORMAT_UNORM = 0x0,
1099 IMG_NUM_FORMAT_SNORM = 0x1,
1100 IMG_NUM_FORMAT_USCALED = 0x2,
1101 IMG_NUM_FORMAT_SSCALED = 0x3,
1102 IMG_NUM_FORMAT_UINT = 0x4,
1103 IMG_NUM_FORMAT_SINT = 0x5,
1104 IMG_NUM_FORMAT_RESERVED_6 = 0x6,
1105 IMG_NUM_FORMAT_FLOAT = 0x7,
1106 IMG_NUM_FORMAT_RESERVED_8 = 0x8,
1107 IMG_NUM_FORMAT_SRGB = 0x9,
1108 IMG_NUM_FORMAT_RESERVED_10 = 0xa,
1109 IMG_NUM_FORMAT_RESERVED_11 = 0xb,
1110 IMG_NUM_FORMAT_RESERVED_12 = 0xc,
1111 IMG_NUM_FORMAT_RESERVED_13 = 0xd,
1112 IMG_NUM_FORMAT_RESERVED_14 = 0xe,
1113 IMG_NUM_FORMAT_RESERVED_15 = 0xf,
1114} IMG_NUM_FORMAT;
1115typedef enum TileType {
1116 ARRAY_COLOR_TILE = 0x0,
1117 ARRAY_DEPTH_TILE = 0x1,
1118} TileType;
1119typedef enum NonDispTilingOrder {
1120 ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
1121 ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
1122} NonDispTilingOrder;
1123typedef enum MicroTileMode {
1124 ADDR_SURF_DISPLAY_MICRO_TILING = 0x0,
1125 ADDR_SURF_THIN_MICRO_TILING = 0x1,
1126 ADDR_SURF_DEPTH_MICRO_TILING = 0x2,
1127 ADDR_SURF_ROTATED_MICRO_TILING = 0x3,
1128 ADDR_SURF_THICK_MICRO_TILING = 0x4,
1129} MicroTileMode;
1130typedef enum TileSplit {
1131 ADDR_SURF_TILE_SPLIT_64B = 0x0,
1132 ADDR_SURF_TILE_SPLIT_128B = 0x1,
1133 ADDR_SURF_TILE_SPLIT_256B = 0x2,
1134 ADDR_SURF_TILE_SPLIT_512B = 0x3,
1135 ADDR_SURF_TILE_SPLIT_1KB = 0x4,
1136 ADDR_SURF_TILE_SPLIT_2KB = 0x5,
1137 ADDR_SURF_TILE_SPLIT_4KB = 0x6,
1138} TileSplit;
1139typedef enum SampleSplit {
1140 ADDR_SURF_SAMPLE_SPLIT_1 = 0x0,
1141 ADDR_SURF_SAMPLE_SPLIT_2 = 0x1,
1142 ADDR_SURF_SAMPLE_SPLIT_4 = 0x2,
1143 ADDR_SURF_SAMPLE_SPLIT_8 = 0x3,
1144} SampleSplit;
1145typedef enum PipeConfig {
1146 ADDR_SURF_P2 = 0x0,
1147 ADDR_SURF_P2_RESERVED0 = 0x1,
1148 ADDR_SURF_P2_RESERVED1 = 0x2,
1149 ADDR_SURF_P2_RESERVED2 = 0x3,
1150 ADDR_SURF_P4_8x16 = 0x4,
1151 ADDR_SURF_P4_16x16 = 0x5,
1152 ADDR_SURF_P4_16x32 = 0x6,
1153 ADDR_SURF_P4_32x32 = 0x7,
1154 ADDR_SURF_P8_16x16_8x16 = 0x8,
1155 ADDR_SURF_P8_16x32_8x16 = 0x9,
1156 ADDR_SURF_P8_32x32_8x16 = 0xa,
1157 ADDR_SURF_P8_16x32_16x16 = 0xb,
1158 ADDR_SURF_P8_32x32_16x16 = 0xc,
1159 ADDR_SURF_P8_32x32_16x32 = 0xd,
1160 ADDR_SURF_P8_32x64_32x32 = 0xe,
1161 ADDR_SURF_P8_RESERVED0 = 0xf,
1162 ADDR_SURF_P16_32x32_8x16 = 0x10,
1163 ADDR_SURF_P16_32x32_16x16 = 0x11,
1164} PipeConfig;
1165typedef enum NumBanks {
1166 ADDR_SURF_2_BANK = 0x0,
1167 ADDR_SURF_4_BANK = 0x1,
1168 ADDR_SURF_8_BANK = 0x2,
1169 ADDR_SURF_16_BANK = 0x3,
1170} NumBanks;
1171typedef enum BankWidth {
1172 ADDR_SURF_BANK_WIDTH_1 = 0x0,
1173 ADDR_SURF_BANK_WIDTH_2 = 0x1,
1174 ADDR_SURF_BANK_WIDTH_4 = 0x2,
1175 ADDR_SURF_BANK_WIDTH_8 = 0x3,
1176} BankWidth;
1177typedef enum BankHeight {
1178 ADDR_SURF_BANK_HEIGHT_1 = 0x0,
1179 ADDR_SURF_BANK_HEIGHT_2 = 0x1,
1180 ADDR_SURF_BANK_HEIGHT_4 = 0x2,
1181 ADDR_SURF_BANK_HEIGHT_8 = 0x3,
1182} BankHeight;
1183typedef enum BankWidthHeight {
1184 ADDR_SURF_BANK_WH_1 = 0x0,
1185 ADDR_SURF_BANK_WH_2 = 0x1,
1186 ADDR_SURF_BANK_WH_4 = 0x2,
1187 ADDR_SURF_BANK_WH_8 = 0x3,
1188} BankWidthHeight;
1189typedef enum MacroTileAspect {
1190 ADDR_SURF_MACRO_ASPECT_1 = 0x0,
1191 ADDR_SURF_MACRO_ASPECT_2 = 0x1,
1192 ADDR_SURF_MACRO_ASPECT_4 = 0x2,
1193 ADDR_SURF_MACRO_ASPECT_8 = 0x3,
1194} MacroTileAspect;
1195typedef enum GATCL1RequestType {
1196 GATCL1_TYPE_NORMAL = 0x0,
1197 GATCL1_TYPE_SHOOTDOWN = 0x1,
1198 GATCL1_TYPE_BYPASS = 0x2,
1199} GATCL1RequestType;
1200typedef enum TCC_CACHE_POLICIES {
1201 TCC_CACHE_POLICY_LRU = 0x0,
1202 TCC_CACHE_POLICY_STREAM = 0x1,
1203} TCC_CACHE_POLICIES;
1204typedef enum MTYPE {
1205 MTYPE_NC_NV = 0x0,
1206 MTYPE_NC = 0x1,
1207 MTYPE_CC = 0x2,
1208 MTYPE_UC = 0x3,
1209} MTYPE;
1210typedef enum PERFMON_COUNTER_MODE {
1211 PERFMON_COUNTER_MODE_ACCUM = 0x0,
1212 PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
1213 PERFMON_COUNTER_MODE_MAX = 0x2,
1214 PERFMON_COUNTER_MODE_DIRTY = 0x3,
1215 PERFMON_COUNTER_MODE_SAMPLE = 0x4,
1216 PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5,
1217 PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6,
1218 PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7,
1219 PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8,
1220 PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9,
1221 PERFMON_COUNTER_MODE_RESERVED = 0xf,
1222} PERFMON_COUNTER_MODE;
1223typedef enum PERFMON_SPM_MODE {
1224 PERFMON_SPM_MODE_OFF = 0x0,
1225 PERFMON_SPM_MODE_16BIT_CLAMP = 0x1,
1226 PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2,
1227 PERFMON_SPM_MODE_32BIT_CLAMP = 0x3,
1228 PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4,
1229 PERFMON_SPM_MODE_RESERVED_5 = 0x5,
1230 PERFMON_SPM_MODE_RESERVED_6 = 0x6,
1231 PERFMON_SPM_MODE_RESERVED_7 = 0x7,
1232 PERFMON_SPM_MODE_TEST_MODE_0 = 0x8,
1233 PERFMON_SPM_MODE_TEST_MODE_1 = 0x9,
1234 PERFMON_SPM_MODE_TEST_MODE_2 = 0xa,
1235} PERFMON_SPM_MODE;
1236typedef enum SurfaceTiling {
1237 ARRAY_LINEAR = 0x0,
1238 ARRAY_TILED = 0x1,
1239} SurfaceTiling;
1240typedef enum SurfaceArray {
1241 ARRAY_1D = 0x0,
1242 ARRAY_2D = 0x1,
1243 ARRAY_3D = 0x2,
1244 ARRAY_3D_SLICE = 0x3,
1245} SurfaceArray;
1246typedef enum ColorArray {
1247 ARRAY_2D_ALT_COLOR = 0x0,
1248 ARRAY_2D_COLOR = 0x1,
1249 ARRAY_3D_SLICE_COLOR = 0x3,
1250} ColorArray;
1251typedef enum DepthArray {
1252 ARRAY_2D_ALT_DEPTH = 0x0,
1253 ARRAY_2D_DEPTH = 0x1,
1254} DepthArray;
1255typedef enum ENUM_NUM_SIMD_PER_CU {
1256 NUM_SIMD_PER_CU = 0x4,
1257} ENUM_NUM_SIMD_PER_CU;
1258typedef enum MEM_PWR_FORCE_CTRL {
1259 NO_FORCE_REQUEST = 0x0,
1260 FORCE_LIGHT_SLEEP_REQUEST = 0x1,
1261 FORCE_DEEP_SLEEP_REQUEST = 0x2,
1262 FORCE_SHUT_DOWN_REQUEST = 0x3,
1263} MEM_PWR_FORCE_CTRL;
1264typedef enum MEM_PWR_FORCE_CTRL2 {
1265 NO_FORCE_REQ = 0x0,
1266 FORCE_LIGHT_SLEEP_REQ = 0x1,
1267} MEM_PWR_FORCE_CTRL2;
1268typedef enum MEM_PWR_DIS_CTRL {
1269 ENABLE_MEM_PWR_CTRL = 0x0,
1270 DISABLE_MEM_PWR_CTRL = 0x1,
1271} MEM_PWR_DIS_CTRL;
1272typedef enum MEM_PWR_SEL_CTRL {
1273 DYNAMIC_SHUT_DOWN_ENABLE = 0x0,
1274 DYNAMIC_DEEP_SLEEP_ENABLE = 0x1,
1275 DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2,
1276} MEM_PWR_SEL_CTRL;
1277typedef enum MEM_PWR_SEL_CTRL2 {
1278 DYNAMIC_DEEP_SLEEP_EN = 0x0,
1279 DYNAMIC_LIGHT_SLEEP_EN = 0x1,
1280} MEM_PWR_SEL_CTRL2;
1281
1282#endif /* SMU_7_1_3_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
new file mode 100644
index 000000000000..1ede9e274714
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -0,0 +1,6080 @@
1/*
2 * SMU_7_1_3 Register documentation
3 *
4 * Copyright (C) 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
20 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef SMU_7_1_3_SH_MASK_H
25#define SMU_7_1_3_SH_MASK_H
26
27#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
28#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
29#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
30#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
31#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f
32#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0
33#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780
34#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7
35#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800
36#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb
37#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000
38#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd
39#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000
40#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10
41#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f
42#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0
43#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100
44#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8
45#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200
46#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9
47#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
48#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
49#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1
50#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0
51#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2
52#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1
53#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f
54#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0
55#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100
56#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8
57#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200
58#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9
59#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
60#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
61#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1
62#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0
63#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2
64#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1
65#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f
66#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0
67#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100
68#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8
69#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200
70#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9
71#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
72#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa
73#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1
74#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0
75#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2
76#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1
77#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f
78#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0
79#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100
80#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8
81#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200
82#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9
83#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
84#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa
85#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f
86#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0
87#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100
88#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8
89#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200
90#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9
91#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
92#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
93#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1
94#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0
95#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2
96#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1
97#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1
98#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0
99#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2
100#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1
101#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4
102#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2
103#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8
104#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3
105#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10
106#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4
107#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20
108#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5
109#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40
110#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6
111#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80
112#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7
113#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100
114#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8
115#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200
116#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9
117#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400
118#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa
119#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800
120#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb
121#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000
122#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc
123#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000
124#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd
125#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1
126#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0
127#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2
128#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1
129#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4
130#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2
131#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8
132#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3
133#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10
134#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4
135#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0
136#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5
137#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800
138#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb
139#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000
140#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc
141#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000
142#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14
143#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000
144#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b
145#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000
146#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c
147#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff
148#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0
149#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800
150#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb
151#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000
152#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16
153#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000
154#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17
155#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000
156#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18
157#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000
158#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19
159#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000
160#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a
161#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000
162#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b
163#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000
164#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c
165#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000
166#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e
167#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff
168#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0
169#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000
170#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c
171#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf
172#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0
173#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60
174#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5
175#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180
176#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7
177#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00
178#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9
179#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000
180#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc
181#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000
182#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15
183#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000
184#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17
185#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000
186#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18
187#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000
188#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19
189#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000
190#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a
191#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000
192#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c
193#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000
194#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f
195#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1
196#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0
197#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2
198#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1
199#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc
200#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2
201#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30
202#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4
203#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0
204#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6
205#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100
206#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8
207#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200
208#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9
209#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff
210#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0
211#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00
212#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8
213#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000
214#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10
215#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000
216#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11
217#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000
218#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15
219#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000
220#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
221#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
222#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
223#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
224#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
225#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
226#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1
227#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4
228#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2
229#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8
230#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3
231#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10
232#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4
233#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00
234#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa
235#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000
236#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc
237#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000
238#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c
239#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000
240#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d
241#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1
242#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0
243#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0
244#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4
245#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff
246#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0
247#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00
248#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8
249#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2
250#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1
251#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4
252#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2
253#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1
254#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0
255#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8
256#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3
257#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100
258#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8
259#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000
260#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe
261#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000
262#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf
263#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000
264#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10
265#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000
266#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11
267#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000
268#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12
269#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000
270#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13
271#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000
272#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14
273#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000
274#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15
275#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000
276#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16
277#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000
278#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18
279#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1
280#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0
281#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6
282#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1
283#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200
284#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9
285#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00
286#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa
287#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff
288#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0
289#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00
290#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8
291#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000
292#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10
293#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff
294#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0
295#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00
296#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8
297#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000
298#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10
299#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f
300#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
301#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0
302#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5
303#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00
304#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa
305#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000
306#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11
307#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000
308#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12
309#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000
310#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11
311#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7
312#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0
313#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38
314#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3
315#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0
316#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6
317#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00
318#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9
319#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000
320#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc
321#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000
322#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf
323#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000
324#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12
325#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000
326#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15
327#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000
328#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18
329#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000
330#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b
331#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
332#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
333#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
334#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
335#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
336#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
337#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
338#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
339#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
340#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
341#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
342#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
343#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
344#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
345#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
346#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
347#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
348#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
349#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
350#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
351#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
352#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
353#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
354#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
355#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
356#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
357#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
358#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
359#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
360#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
361#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
362#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
363#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
364#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
365#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
366#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
367#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1
368#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0
369#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2
370#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1
371#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4
372#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2
373#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8
374#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3
375#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10
376#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4
377#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20
378#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5
379#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40
380#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6
381#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80
382#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7
383#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100
384#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8
385#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200
386#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9
387#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400
388#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa
389#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800
390#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb
391#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000
392#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc
393#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000
394#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd
395#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000
396#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe
397#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000
398#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf
399#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff
400#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0
401#define SMC_RESP_0__SMC_RESP_MASK 0xffff
402#define SMC_RESP_0__SMC_RESP__SHIFT 0x0
403#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff
404#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0
405#define SMC_RESP_1__SMC_RESP_MASK 0xffff
406#define SMC_RESP_1__SMC_RESP__SHIFT 0x0
407#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff
408#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0
409#define SMC_RESP_2__SMC_RESP_MASK 0xffff
410#define SMC_RESP_2__SMC_RESP__SHIFT 0x0
411#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff
412#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0
413#define SMC_RESP_3__SMC_RESP_MASK 0xffff
414#define SMC_RESP_3__SMC_RESP__SHIFT 0x0
415#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff
416#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0
417#define SMC_RESP_4__SMC_RESP_MASK 0xffff
418#define SMC_RESP_4__SMC_RESP__SHIFT 0x0
419#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff
420#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0
421#define SMC_RESP_5__SMC_RESP_MASK 0xffff
422#define SMC_RESP_5__SMC_RESP__SHIFT 0x0
423#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff
424#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0
425#define SMC_RESP_6__SMC_RESP_MASK 0xffff
426#define SMC_RESP_6__SMC_RESP__SHIFT 0x0
427#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff
428#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0
429#define SMC_RESP_7__SMC_RESP_MASK 0xffff
430#define SMC_RESP_7__SMC_RESP__SHIFT 0x0
431#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff
432#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0
433#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff
434#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0
435#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff
436#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0
437#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff
438#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0
439#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff
440#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0
441#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff
442#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0
443#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff
444#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0
445#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff
446#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0
447#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff
448#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0
449#define SMC_RESP_8__SMC_RESP_MASK 0xffff
450#define SMC_RESP_8__SMC_RESP__SHIFT 0x0
451#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff
452#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0
453#define SMC_RESP_9__SMC_RESP_MASK 0xffff
454#define SMC_RESP_9__SMC_RESP__SHIFT 0x0
455#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff
456#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0
457#define SMC_RESP_10__SMC_RESP_MASK 0xffff
458#define SMC_RESP_10__SMC_RESP__SHIFT 0x0
459#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff
460#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0
461#define SMC_RESP_11__SMC_RESP_MASK 0xffff
462#define SMC_RESP_11__SMC_RESP__SHIFT 0x0
463#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff
464#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0
465#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff
466#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0
467#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff
468#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0
469#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff
470#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0
471#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1
472#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0
473#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2
474#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1
475#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000
476#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e
477#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1
478#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0
479#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2
480#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1
481#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00
482#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8
483#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000
484#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18
485#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1
486#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0
487#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff
488#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0
489#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2
490#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1
491#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff
492#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0
493#define SMC_PC_C__smc_pc_c_MASK 0xffffffff
494#define SMC_PC_C__smc_pc_c__SHIFT 0x0
495#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff
496#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0
497#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1
498#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
499#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf
500#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0
501#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0
502#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4
503#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff
504#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
505#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff
506#define GPIOPAD_A__GPIO_A__SHIFT 0x0
507#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff
508#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0
509#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff
510#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0
511#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1
512#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
513#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2
514#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
515#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4
516#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
517#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8
518#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
519#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10
520#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
521#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20
522#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
523#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40
524#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
525#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80
526#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
527#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100
528#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
529#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200
530#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
531#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400
532#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
533#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800
534#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
535#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000
536#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
537#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000
538#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
539#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000
540#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
541#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000
542#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
543#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000
544#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
545#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000
546#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
547#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000
548#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
549#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000
550#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
551#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000
552#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
553#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000
554#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
555#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000
556#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
557#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000
558#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
559#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000
560#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
561#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000
562#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
563#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000
564#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
565#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000
566#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
567#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000
568#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
569#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000
570#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
571#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000
572#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
573#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff
574#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
575#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000
576#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
577#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff
578#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
579#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000
580#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
581#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1
582#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
583#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2
584#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
585#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4
586#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
587#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8
588#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
589#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10
590#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
591#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20
592#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
593#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40
594#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
595#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80
596#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
597#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100
598#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
599#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200
600#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
601#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400
602#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
603#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800
604#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
605#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000
606#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
607#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000
608#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
609#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000
610#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
611#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000
612#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
613#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000
614#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
615#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000
616#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
617#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000
618#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
619#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000
620#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
621#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000
622#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
623#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000
624#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
625#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000
626#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
627#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000
628#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
629#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000
630#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
631#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000
632#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
633#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000
634#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
635#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000
636#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
637#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000
638#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
639#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000
640#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
641#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff
642#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
643#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000
644#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
645#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff
646#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
647#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000
648#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
649#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff
650#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
651#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000
652#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
653#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f
654#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0
655#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20
656#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5
657#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40
658#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6
659#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff
660#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0
661#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff
662#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
663#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff
664#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
665#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff
666#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0
667#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
668#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
669#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
670#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
671#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
672#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
673#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
674#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
675#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
676#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
677#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
678#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
679#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
680#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
681#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
682#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
683#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
684#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
685#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
686#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
687#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
688#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
689#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
690#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
691#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
692#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
693#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
694#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
695#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
696#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
697#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
698#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
699#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
700#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
701#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
702#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
703#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1
704#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0
705#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2
706#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1
707#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4
708#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2
709#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8
710#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3
711#define RCU_UC_EVENTS__TP_Tester_MASK 0x40
712#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6
713#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80
714#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7
715#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100
716#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8
717#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200
718#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9
719#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400
720#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa
721#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800
722#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb
723#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000
724#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd
725#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000
726#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10
727#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000
728#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11
729#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000
730#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12
731#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000
732#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13
733#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000
734#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18
735#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2
736#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1
737#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8
738#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3
739#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10
740#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4
741#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20
742#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5
743#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100
744#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8
745#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000
746#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10
747#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000
748#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11
749#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000
750#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16
751#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000
752#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17
753#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff
754#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0
755#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000
756#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f
757#define CC_RCU_FUSES__GPU_DIS_MASK 0x2
758#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1
759#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4
760#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2
761#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10
762#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4
763#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20
764#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5
765#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40
766#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6
767#define CC_RCU_FUSES__ROM_DIS_MASK 0x80
768#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7
769#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100
770#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8
771#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200
772#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9
773#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400
774#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa
775#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000
776#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe
777#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000
778#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf
779#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000
780#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10
781#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000
782#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11
783#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000
784#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12
785#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000
786#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13
787#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000
788#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15
789#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000
790#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16
791#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000
792#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17
793#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000
794#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18
795#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000
796#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19
797#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000
798#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a
799#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2
800#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1
801#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc
802#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2
803#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600
804#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9
805#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800
806#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb
807#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000
808#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12
809#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000
810#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13
811#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000
812#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14
813#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000
814#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15
815#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000
816#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16
817#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000
818#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17
819#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000
820#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b
821#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000
822#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c
823#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000
824#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d
825#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff
826#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0
827#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00
828#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8
829#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000
830#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10
831#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000
832#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18
833#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe
834#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1
835#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e
836#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1
837#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e
838#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1
839#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40
840#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6
841#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80
842#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7
843#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100
844#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8
845#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200
846#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9
847#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400
848#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa
849#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800
850#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb
851#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000
852#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc
853#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000
854#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd
855#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000
856#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe
857#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000
858#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16
859#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000
860#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17
861#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000
862#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18
863#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000
864#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19
865#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000
866#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a
867#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0
868#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4
869#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000
870#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14
871#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000
872#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18
873#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
874#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
875#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2
876#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1
877#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6
878#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1
879#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10
880#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4
881#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40
882#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6
883#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00
884#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8
885#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff
886#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0
887#define SMU_STATUS__SMU_DONE_MASK 0x1
888#define SMU_STATUS__SMU_DONE__SHIFT 0x0
889#define SMU_STATUS__SMU_PASS_MASK 0x2
890#define SMU_STATUS__SMU_PASS__SHIFT 0x1
891#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1
892#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0
893#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6
894#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1
895#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8
896#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3
897#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10
898#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4
899#define SMU_FIRMWARE__SMU_counter_MASK 0xf00
900#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8
901#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000
902#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10
903#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000
904#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11
905#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff
906#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0
907#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000
908#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f
909#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff
910#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0
911#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1
912#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
913#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe
914#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
915#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000
916#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18
917#define TDC_STATUS__VDD_Boost_MASK 0xff
918#define TDC_STATUS__VDD_Boost__SHIFT 0x0
919#define TDC_STATUS__VDD_Throttle_MASK 0xff00
920#define TDC_STATUS__VDD_Throttle__SHIFT 0x8
921#define TDC_STATUS__VDDC_Boost_MASK 0xff0000
922#define TDC_STATUS__VDDC_Boost__SHIFT 0x10
923#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000
924#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18
925#define TDC_MV_AVERAGE__IDD_MASK 0xffff
926#define TDC_MV_AVERAGE__IDD__SHIFT 0x0
927#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000
928#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10
929#define TDC_VRM_LIMIT__IDD_MASK 0xffff
930#define TDC_VRM_LIMIT__IDD__SHIFT 0x0
931#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000
932#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10
933#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1
934#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0
935#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2
936#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1
937#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4
938#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2
939#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8
940#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3
941#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10
942#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4
943#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20
944#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5
945#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40
946#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6
947#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80
948#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7
949#define FEATURE_STATUS__BAPM_ON_MASK 0x100
950#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8
951#define FEATURE_STATUS__LPMX_ON_MASK 0x200
952#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9
953#define FEATURE_STATUS__NBDPM_ON_MASK 0x400
954#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa
955#define FEATURE_STATUS__LHTC_ON_MASK 0x800
956#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb
957#define FEATURE_STATUS__VPC_ON_MASK 0x1000
958#define FEATURE_STATUS__VPC_ON__SHIFT 0xc
959#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000
960#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd
961#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000
962#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe
963#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000
964#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf
965#define FEATURE_STATUS__AVS_ON_MASK 0x10000
966#define FEATURE_STATUS__AVS_ON__SHIFT 0x10
967#define FEATURE_STATUS__SPMI_ON_MASK 0x20000
968#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11
969#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000
970#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12
971#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000
972#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13
973#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000
974#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14
975#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000
976#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15
977#define FEATURE_STATUS__RESERVED_MASK 0xffc00000
978#define FEATURE_STATUS__RESERVED__SHIFT 0x16
979#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff
980#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0
981#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff
982#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0
983#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff
984#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0
985#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff
986#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0
987#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00
988#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8
989#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000
990#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10
991#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000
992#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18
993#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff
994#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0
995#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff
996#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0
997#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff
998#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0
999#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00
1000#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8
1001#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000
1002#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10
1003#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000
1004#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18
1005#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff
1006#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0
1007#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff
1008#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0
1009#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff
1010#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0
1011#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00
1012#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8
1013#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000
1014#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10
1015#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000
1016#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18
1017#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff
1018#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0
1019#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff
1020#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0
1021#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff
1022#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0
1023#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00
1024#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8
1025#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000
1026#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10
1027#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000
1028#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18
1029#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff
1030#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0
1031#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff
1032#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0
1033#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff
1034#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0
1035#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00
1036#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8
1037#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000
1038#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10
1039#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000
1040#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18
1041#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff
1042#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0
1043#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff
1044#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0
1045#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff
1046#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0
1047#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00
1048#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8
1049#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000
1050#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10
1051#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000
1052#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18
1053#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff
1054#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0
1055#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff
1056#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0
1057#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff
1058#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0
1059#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00
1060#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8
1061#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000
1062#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10
1063#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000
1064#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18
1065#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff
1066#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0
1067#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff
1068#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0
1069#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff
1070#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0
1071#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00
1072#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8
1073#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000
1074#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10
1075#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000
1076#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18
1077#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff
1078#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0
1079#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff
1080#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0
1081#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff
1082#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0
1083#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00
1084#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8
1085#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000
1086#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10
1087#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000
1088#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18
1089#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff
1090#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0
1091#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff
1092#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0
1093#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff
1094#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0
1095#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00
1096#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8
1097#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000
1098#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10
1099#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000
1100#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18
1101#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff
1102#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0
1103#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff
1104#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0
1105#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff
1106#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0
1107#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00
1108#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8
1109#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000
1110#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10
1111#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000
1112#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18
1113#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff
1114#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0
1115#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff
1116#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0
1117#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff
1118#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0
1119#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00
1120#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8
1121#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000
1122#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10
1123#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000
1124#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18
1125#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff
1126#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0
1127#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff
1128#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0
1129#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff
1130#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0
1131#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00
1132#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8
1133#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000
1134#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10
1135#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000
1136#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18
1137#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff
1138#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0
1139#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff
1140#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0
1141#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff
1142#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0
1143#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00
1144#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8
1145#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000
1146#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10
1147#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000
1148#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18
1149#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff
1150#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0
1151#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff
1152#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0
1153#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff
1154#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0
1155#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00
1156#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8
1157#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000
1158#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10
1159#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000
1160#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18
1161#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff
1162#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0
1163#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff
1164#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0
1165#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff
1166#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0
1167#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00
1168#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8
1169#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000
1170#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10
1171#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000
1172#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18
1173#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff
1174#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0
1175#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff
1176#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0
1177#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff
1178#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0
1179#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00
1180#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8
1181#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000
1182#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10
1183#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000
1184#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18
1185#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff
1186#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0
1187#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff
1188#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0
1189#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff
1190#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0
1191#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00
1192#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8
1193#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000
1194#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10
1195#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000
1196#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18
1197#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff
1198#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0
1199#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff
1200#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0
1201#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff
1202#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0
1203#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00
1204#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8
1205#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000
1206#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10
1207#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000
1208#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18
1209#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff
1210#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0
1211#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff
1212#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0
1213#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff
1214#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0
1215#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00
1216#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8
1217#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000
1218#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10
1219#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000
1220#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18
1221#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff
1222#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0
1223#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff
1224#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0
1225#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff
1226#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0
1227#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00
1228#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8
1229#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000
1230#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10
1231#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000
1232#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18
1233#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff
1234#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0
1235#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff
1236#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0
1237#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff
1238#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0
1239#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00
1240#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8
1241#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000
1242#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10
1243#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000
1244#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18
1245#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff
1246#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0
1247#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff
1248#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0
1249#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff
1250#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0
1251#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00
1252#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8
1253#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000
1254#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10
1255#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000
1256#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18
1257#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff
1258#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0
1259#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff
1260#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0
1261#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff
1262#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0
1263#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00
1264#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8
1265#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000
1266#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10
1267#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000
1268#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18
1269#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff
1270#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0
1271#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff
1272#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0
1273#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff
1274#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0
1275#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00
1276#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8
1277#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000
1278#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10
1279#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000
1280#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18
1281#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff
1282#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0
1283#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff
1284#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0
1285#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff
1286#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0
1287#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00
1288#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8
1289#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000
1290#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10
1291#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000
1292#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18
1293#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff
1294#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0
1295#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff
1296#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0
1297#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff
1298#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0
1299#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00
1300#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8
1301#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000
1302#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10
1303#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000
1304#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18
1305#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff
1306#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0
1307#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff
1308#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0
1309#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff
1310#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0
1311#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00
1312#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8
1313#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000
1314#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10
1315#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000
1316#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18
1317#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff
1318#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0
1319#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff
1320#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0
1321#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff
1322#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0
1323#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00
1324#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8
1325#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000
1326#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10
1327#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000
1328#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18
1329#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff
1330#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0
1331#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff
1332#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0
1333#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff
1334#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0
1335#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00
1336#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8
1337#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000
1338#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10
1339#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000
1340#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18
1341#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff
1342#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0
1343#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff
1344#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0
1345#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff
1346#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0
1347#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00
1348#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8
1349#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000
1350#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10
1351#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000
1352#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18
1353#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff
1354#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0
1355#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff
1356#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0
1357#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff
1358#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0
1359#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00
1360#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8
1361#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000
1362#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10
1363#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000
1364#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18
1365#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff
1366#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0
1367#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff
1368#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0
1369#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff
1370#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0
1371#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff
1372#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0
1373#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff
1374#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0
1375#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff
1376#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0
1377#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff
1378#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0
1379#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff
1380#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0
1381#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff
1382#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0
1383#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff
1384#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0
1385#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff
1386#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0
1387#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff
1388#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0
1389#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff
1390#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0
1391#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff
1392#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0
1393#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff
1394#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0
1395#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff
1396#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0
1397#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff
1398#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0
1399#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff
1400#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0
1401#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff
1402#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0
1403#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff
1404#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0
1405#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff
1406#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0
1407#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff
1408#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0
1409#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff
1410#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0
1411#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff
1412#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0
1413#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff
1414#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0
1415#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff
1416#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0
1417#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff
1418#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0
1419#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff
1420#define DPM_TABLE_28__SystemFlags__SHIFT 0x0
1421#define DPM_TABLE_29__VRConfig_MASK 0xffffffff
1422#define DPM_TABLE_29__VRConfig__SHIFT 0x0
1423#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff
1424#define DPM_TABLE_30__SmioMask1__SHIFT 0x0
1425#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff
1426#define DPM_TABLE_31__SmioMask2__SHIFT 0x0
1427#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff
1428#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0
1429#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00
1430#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8
1431#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000
1432#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10
1433#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff
1434#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0
1435#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00
1436#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8
1437#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000
1438#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10
1439#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff
1440#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0
1441#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00
1442#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8
1443#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000
1444#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10
1445#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff
1446#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0
1447#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00
1448#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8
1449#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000
1450#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10
1451#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff
1452#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0
1453#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00
1454#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8
1455#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000
1456#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10
1457#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff
1458#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0
1459#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00
1460#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8
1461#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000
1462#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10
1463#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff
1464#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0
1465#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00
1466#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8
1467#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000
1468#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10
1469#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff
1470#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0
1471#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00
1472#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8
1473#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000
1474#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10
1475#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff
1476#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0
1477#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff
1478#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0
1479#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff
1480#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0
1481#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff
1482#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0
1483#define DPM_TABLE_44__VddcTable_1_MASK 0xffff
1484#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0
1485#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000
1486#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10
1487#define DPM_TABLE_45__VddcTable_3_MASK 0xffff
1488#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0
1489#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000
1490#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10
1491#define DPM_TABLE_46__VddcTable_5_MASK 0xffff
1492#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0
1493#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000
1494#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10
1495#define DPM_TABLE_47__VddcTable_7_MASK 0xffff
1496#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0
1497#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000
1498#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10
1499#define DPM_TABLE_48__VddcTable_9_MASK 0xffff
1500#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0
1501#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000
1502#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10
1503#define DPM_TABLE_49__VddcTable_11_MASK 0xffff
1504#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0
1505#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000
1506#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10
1507#define DPM_TABLE_50__VddcTable_13_MASK 0xffff
1508#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0
1509#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000
1510#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10
1511#define DPM_TABLE_51__VddcTable_15_MASK 0xffff
1512#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0
1513#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000
1514#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10
1515#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff
1516#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0
1517#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000
1518#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10
1519#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff
1520#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0
1521#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000
1522#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10
1523#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff
1524#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0
1525#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000
1526#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10
1527#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff
1528#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0
1529#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000
1530#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10
1531#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff
1532#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0
1533#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000
1534#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10
1535#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff
1536#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0
1537#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000
1538#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10
1539#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff
1540#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0
1541#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000
1542#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10
1543#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff
1544#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0
1545#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000
1546#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10
1547#define DPM_TABLE_60__VddciTable_1_MASK 0xffff
1548#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0
1549#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000
1550#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10
1551#define DPM_TABLE_61__VddciTable_3_MASK 0xffff
1552#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0
1553#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000
1554#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10
1555#define DPM_TABLE_62__VddciTable_5_MASK 0xffff
1556#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0
1557#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000
1558#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10
1559#define DPM_TABLE_63__VddciTable_7_MASK 0xffff
1560#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0
1561#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000
1562#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10
1563#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff
1564#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0
1565#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00
1566#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8
1567#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000
1568#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10
1569#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000
1570#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18
1571#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff
1572#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0
1573#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00
1574#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8
1575#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000
1576#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10
1577#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000
1578#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18
1579#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff
1580#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0
1581#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00
1582#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8
1583#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000
1584#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10
1585#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000
1586#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18
1587#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff
1588#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0
1589#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00
1590#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8
1591#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000
1592#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10
1593#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000
1594#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18
1595#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff
1596#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0
1597#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00
1598#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8
1599#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000
1600#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10
1601#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000
1602#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18
1603#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff
1604#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0
1605#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00
1606#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8
1607#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000
1608#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10
1609#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000
1610#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18
1611#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff
1612#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0
1613#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00
1614#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8
1615#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000
1616#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10
1617#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000
1618#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18
1619#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff
1620#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0
1621#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00
1622#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8
1623#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000
1624#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10
1625#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000
1626#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18
1627#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff
1628#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0
1629#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00
1630#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8
1631#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000
1632#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10
1633#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000
1634#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18
1635#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff
1636#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0
1637#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00
1638#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8
1639#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000
1640#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10
1641#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000
1642#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18
1643#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff
1644#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0
1645#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00
1646#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8
1647#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000
1648#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10
1649#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000
1650#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18
1651#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff
1652#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0
1653#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00
1654#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8
1655#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000
1656#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10
1657#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000
1658#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18
1659#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff
1660#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0
1661#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00
1662#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8
1663#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000
1664#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10
1665#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000
1666#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18
1667#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff
1668#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0
1669#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00
1670#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8
1671#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000
1672#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10
1673#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000
1674#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18
1675#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff
1676#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0
1677#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00
1678#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8
1679#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000
1680#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10
1681#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000
1682#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18
1683#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff
1684#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0
1685#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00
1686#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8
1687#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000
1688#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10
1689#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000
1690#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18
1691#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff
1692#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0
1693#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00
1694#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8
1695#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000
1696#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10
1697#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000
1698#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18
1699#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff
1700#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0
1701#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00
1702#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8
1703#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000
1704#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10
1705#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000
1706#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18
1707#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff
1708#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0
1709#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00
1710#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8
1711#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000
1712#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10
1713#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000
1714#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18
1715#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff
1716#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0
1717#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00
1718#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8
1719#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000
1720#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10
1721#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000
1722#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18
1723#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff
1724#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0
1725#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00
1726#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8
1727#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000
1728#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10
1729#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000
1730#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18
1731#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff
1732#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0
1733#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00
1734#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8
1735#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000
1736#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10
1737#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000
1738#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18
1739#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff
1740#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0
1741#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00
1742#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8
1743#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000
1744#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10
1745#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000
1746#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18
1747#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff
1748#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0
1749#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00
1750#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8
1751#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000
1752#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10
1753#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000
1754#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18
1755#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff
1756#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0
1757#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00
1758#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8
1759#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000
1760#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10
1761#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000
1762#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18
1763#define DPM_TABLE_89__SamuLevelCount_MASK 0xff
1764#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0
1765#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00
1766#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8
1767#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000
1768#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10
1769#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000
1770#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18
1771#define DPM_TABLE_90__Reserved_0_MASK 0xff
1772#define DPM_TABLE_90__Reserved_0__SHIFT 0x0
1773#define DPM_TABLE_90__ThermOutMode_MASK 0xff00
1774#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8
1775#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000
1776#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10
1777#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000
1778#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18
1779#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff
1780#define DPM_TABLE_91__Reserved_0__SHIFT 0x0
1781#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff
1782#define DPM_TABLE_92__Reserved_1__SHIFT 0x0
1783#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff
1784#define DPM_TABLE_93__Reserved_2__SHIFT 0x0
1785#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff
1786#define DPM_TABLE_94__Reserved_3__SHIFT 0x0
1787#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff
1788#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0
1789#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00
1790#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8
1791#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000
1792#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10
1793#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000
1794#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18
1795#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff
1796#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0
1797#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff
1798#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0
1799#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000
1800#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10
1801#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000
1802#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18
1803#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff
1804#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0
1805#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff
1806#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0
1807#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff
1808#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0
1809#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff
1810#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0
1811#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff
1812#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0
1813#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff
1814#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0
1815#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff
1816#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0
1817#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00
1818#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8
1819#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000
1820#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10
1821#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000
1822#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18
1823#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff
1824#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0
1825#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00
1826#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8
1827#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000
1828#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10
1829#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000
1830#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18
1831#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff
1832#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0
1833#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00
1834#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8
1835#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000
1836#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10
1837#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000
1838#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18
1839#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff
1840#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0
1841#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff
1842#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0
1843#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000
1844#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10
1845#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000
1846#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18
1847#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff
1848#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0
1849#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff
1850#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0
1851#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff
1852#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0
1853#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff
1854#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0
1855#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff
1856#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0
1857#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff
1858#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0
1859#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff
1860#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0
1861#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00
1862#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8
1863#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000
1864#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10
1865#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000
1866#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18
1867#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff
1868#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0
1869#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00
1870#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8
1871#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000
1872#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10
1873#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000
1874#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18
1875#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff
1876#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0
1877#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00
1878#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8
1879#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000
1880#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10
1881#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000
1882#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18
1883#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff
1884#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0
1885#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff
1886#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0
1887#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000
1888#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10
1889#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000
1890#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18
1891#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff
1892#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0
1893#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff
1894#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0
1895#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff
1896#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0
1897#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff
1898#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0
1899#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff
1900#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0
1901#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff
1902#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0
1903#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff
1904#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0
1905#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00
1906#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8
1907#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000
1908#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10
1909#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000
1910#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18
1911#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff
1912#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0
1913#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00
1914#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8
1915#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000
1916#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10
1917#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000
1918#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18
1919#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff
1920#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0
1921#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00
1922#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8
1923#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000
1924#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10
1925#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000
1926#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18
1927#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff
1928#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0
1929#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff
1930#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0
1931#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000
1932#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10
1933#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000
1934#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18
1935#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff
1936#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0
1937#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff
1938#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0
1939#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff
1940#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0
1941#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff
1942#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0
1943#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff
1944#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0
1945#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff
1946#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0
1947#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff
1948#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0
1949#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00
1950#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8
1951#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000
1952#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10
1953#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000
1954#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18
1955#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff
1956#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0
1957#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00
1958#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8
1959#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000
1960#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10
1961#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000
1962#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18
1963#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff
1964#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0
1965#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00
1966#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8
1967#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000
1968#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10
1969#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000
1970#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18
1971#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff
1972#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0
1973#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff
1974#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0
1975#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000
1976#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10
1977#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000
1978#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18
1979#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff
1980#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0
1981#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff
1982#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0
1983#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff
1984#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0
1985#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff
1986#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0
1987#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff
1988#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0
1989#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff
1990#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0
1991#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff
1992#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0
1993#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00
1994#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8
1995#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000
1996#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10
1997#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000
1998#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18
1999#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff
2000#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0
2001#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00
2002#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8
2003#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000
2004#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10
2005#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000
2006#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18
2007#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff
2008#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0
2009#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00
2010#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8
2011#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000
2012#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10
2013#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000
2014#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18
2015#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff
2016#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0
2017#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff
2018#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0
2019#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000
2020#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10
2021#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000
2022#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18
2023#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff
2024#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0
2025#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff
2026#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0
2027#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff
2028#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0
2029#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff
2030#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0
2031#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff
2032#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0
2033#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff
2034#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0
2035#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff
2036#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0
2037#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00
2038#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8
2039#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000
2040#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10
2041#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000
2042#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18
2043#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff
2044#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0
2045#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00
2046#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8
2047#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000
2048#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10
2049#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000
2050#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18
2051#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff
2052#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0
2053#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00
2054#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8
2055#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000
2056#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10
2057#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000
2058#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18
2059#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff
2060#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0
2061#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff
2062#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0
2063#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000
2064#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10
2065#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000
2066#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18
2067#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff
2068#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0
2069#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff
2070#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0
2071#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff
2072#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0
2073#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff
2074#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0
2075#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff
2076#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0
2077#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff
2078#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0
2079#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff
2080#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0
2081#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00
2082#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8
2083#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000
2084#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10
2085#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000
2086#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18
2087#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff
2088#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0
2089#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00
2090#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8
2091#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000
2092#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10
2093#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000
2094#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18
2095#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff
2096#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0
2097#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00
2098#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8
2099#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000
2100#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10
2101#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000
2102#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18
2103#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff
2104#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0
2105#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff
2106#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0
2107#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000
2108#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10
2109#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000
2110#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18
2111#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff
2112#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0
2113#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff
2114#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0
2115#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff
2116#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0
2117#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff
2118#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0
2119#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff
2120#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0
2121#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff
2122#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0
2123#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff
2124#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0
2125#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00
2126#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8
2127#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000
2128#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10
2129#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000
2130#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18
2131#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff
2132#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0
2133#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00
2134#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8
2135#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000
2136#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10
2137#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000
2138#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18
2139#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff
2140#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0
2141#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00
2142#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8
2143#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000
2144#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10
2145#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000
2146#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18
2147#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff
2148#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0
2149#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff
2150#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0
2151#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff
2152#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0
2153#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00
2154#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8
2155#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000
2156#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10
2157#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000
2158#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18
2159#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff
2160#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0
2161#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00
2162#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8
2163#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000
2164#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10
2165#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000
2166#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18
2167#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff
2168#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0
2169#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00
2170#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8
2171#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000
2172#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10
2173#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff
2174#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0
2175#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00
2176#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8
2177#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000
2178#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10
2179#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000
2180#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18
2181#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff
2182#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0
2183#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff
2184#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0
2185#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff
2186#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0
2187#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00
2188#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8
2189#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000
2190#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10
2191#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000
2192#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18
2193#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff
2194#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0
2195#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00
2196#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8
2197#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000
2198#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10
2199#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000
2200#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18
2201#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff
2202#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0
2203#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00
2204#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8
2205#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000
2206#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10
2207#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff
2208#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0
2209#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00
2210#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8
2211#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000
2212#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10
2213#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000
2214#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18
2215#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff
2216#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0
2217#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff
2218#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0
2219#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff
2220#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0
2221#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00
2222#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8
2223#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000
2224#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10
2225#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000
2226#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18
2227#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff
2228#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0
2229#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00
2230#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8
2231#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000
2232#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10
2233#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000
2234#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18
2235#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff
2236#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0
2237#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00
2238#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8
2239#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000
2240#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10
2241#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff
2242#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0
2243#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00
2244#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8
2245#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000
2246#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10
2247#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000
2248#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18
2249#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff
2250#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0
2251#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff
2252#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0
2253#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff
2254#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0
2255#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00
2256#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8
2257#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000
2258#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10
2259#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000
2260#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18
2261#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff
2262#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0
2263#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00
2264#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8
2265#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000
2266#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10
2267#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000
2268#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18
2269#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff
2270#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0
2271#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00
2272#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8
2273#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000
2274#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10
2275#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff
2276#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0
2277#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00
2278#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8
2279#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000
2280#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10
2281#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000
2282#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18
2283#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff
2284#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0
2285#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff
2286#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0
2287#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff
2288#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0
2289#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00
2290#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8
2291#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000
2292#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10
2293#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000
2294#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18
2295#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff
2296#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0
2297#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00
2298#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8
2299#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000
2300#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10
2301#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000
2302#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18
2303#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff
2304#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0
2305#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00
2306#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8
2307#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000
2308#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10
2309#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff
2310#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0
2311#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00
2312#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8
2313#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000
2314#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10
2315#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000
2316#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18
2317#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff
2318#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0
2319#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff
2320#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0
2321#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff
2322#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0
2323#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff
2324#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0
2325#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00
2326#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8
2327#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000
2328#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10
2329#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000
2330#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18
2331#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff
2332#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0
2333#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff
2334#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0
2335#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff
2336#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0
2337#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff
2338#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0
2339#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00
2340#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8
2341#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000
2342#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10
2343#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000
2344#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18
2345#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff
2346#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0
2347#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff
2348#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0
2349#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff
2350#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0
2351#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff
2352#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0
2353#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00
2354#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8
2355#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000
2356#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10
2357#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000
2358#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18
2359#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff
2360#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0
2361#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff
2362#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0
2363#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff
2364#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0
2365#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff
2366#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0
2367#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00
2368#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8
2369#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000
2370#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10
2371#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000
2372#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18
2373#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff
2374#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0
2375#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff
2376#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0
2377#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff
2378#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0
2379#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff
2380#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0
2381#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00
2382#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8
2383#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000
2384#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10
2385#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000
2386#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18
2387#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff
2388#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0
2389#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff
2390#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0
2391#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff
2392#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0
2393#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff
2394#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0
2395#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00
2396#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8
2397#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000
2398#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10
2399#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000
2400#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18
2401#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff
2402#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0
2403#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff
2404#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0
2405#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff
2406#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0
2407#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff
2408#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0
2409#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00
2410#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8
2411#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000
2412#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10
2413#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000
2414#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18
2415#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff
2416#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0
2417#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff
2418#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0
2419#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff
2420#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0
2421#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff
2422#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0
2423#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff
2424#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0
2425#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00
2426#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8
2427#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000
2428#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10
2429#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000
2430#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18
2431#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff
2432#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0
2433#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff
2434#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0
2435#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00
2436#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8
2437#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000
2438#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10
2439#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000
2440#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18
2441#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff
2442#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0
2443#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff
2444#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0
2445#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff
2446#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0
2447#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff
2448#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0
2449#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff
2450#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0
2451#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff
2452#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0
2453#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff
2454#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0
2455#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff
2456#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0
2457#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff
2458#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0
2459#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff
2460#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0
2461#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff
2462#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0
2463#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00
2464#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8
2465#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000
2466#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10
2467#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000
2468#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18
2469#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff
2470#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0
2471#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00
2472#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8
2473#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000
2474#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10
2475#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000
2476#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18
2477#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff
2478#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0
2479#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff
2480#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0
2481#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff
2482#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0
2483#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00
2484#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8
2485#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000
2486#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10
2487#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000
2488#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18
2489#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff
2490#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0
2491#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00
2492#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8
2493#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000
2494#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10
2495#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000
2496#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18
2497#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff
2498#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0
2499#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff
2500#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0
2501#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff
2502#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0
2503#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00
2504#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8
2505#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000
2506#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10
2507#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000
2508#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18
2509#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff
2510#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0
2511#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00
2512#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8
2513#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000
2514#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10
2515#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000
2516#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18
2517#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff
2518#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0
2519#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff
2520#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0
2521#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff
2522#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0
2523#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00
2524#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8
2525#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000
2526#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10
2527#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000
2528#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18
2529#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff
2530#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0
2531#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00
2532#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8
2533#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000
2534#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10
2535#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000
2536#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18
2537#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff
2538#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0
2539#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff
2540#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0
2541#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff
2542#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0
2543#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00
2544#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8
2545#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000
2546#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10
2547#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000
2548#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18
2549#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff
2550#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0
2551#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00
2552#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8
2553#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000
2554#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10
2555#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000
2556#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18
2557#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff
2558#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0
2559#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff
2560#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0
2561#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff
2562#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0
2563#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00
2564#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8
2565#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000
2566#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10
2567#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000
2568#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18
2569#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff
2570#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0
2571#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00
2572#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8
2573#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000
2574#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10
2575#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000
2576#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18
2577#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff
2578#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0
2579#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff
2580#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0
2581#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff
2582#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0
2583#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00
2584#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8
2585#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000
2586#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10
2587#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000
2588#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18
2589#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff
2590#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0
2591#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00
2592#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8
2593#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000
2594#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10
2595#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000
2596#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18
2597#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff
2598#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0
2599#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff
2600#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0
2601#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff
2602#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0
2603#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00
2604#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8
2605#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000
2606#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10
2607#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000
2608#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18
2609#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff
2610#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0
2611#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00
2612#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8
2613#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000
2614#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10
2615#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000
2616#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18
2617#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff
2618#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0
2619#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff
2620#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0
2621#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00
2622#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8
2623#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000
2624#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10
2625#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000
2626#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18
2627#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff
2628#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0
2629#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00
2630#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8
2631#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000
2632#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10
2633#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000
2634#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18
2635#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff
2636#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0
2637#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff
2638#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0
2639#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00
2640#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8
2641#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000
2642#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10
2643#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000
2644#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18
2645#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff
2646#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0
2647#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00
2648#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8
2649#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000
2650#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10
2651#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000
2652#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18
2653#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff
2654#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0
2655#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff
2656#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0
2657#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00
2658#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8
2659#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000
2660#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10
2661#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000
2662#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18
2663#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff
2664#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0
2665#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00
2666#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8
2667#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000
2668#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10
2669#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000
2670#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18
2671#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff
2672#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0
2673#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff
2674#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0
2675#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00
2676#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8
2677#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000
2678#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10
2679#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000
2680#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18
2681#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff
2682#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0
2683#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00
2684#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8
2685#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000
2686#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10
2687#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000
2688#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18
2689#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff
2690#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0
2691#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff
2692#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0
2693#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00
2694#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8
2695#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000
2696#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10
2697#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000
2698#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18
2699#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff
2700#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0
2701#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00
2702#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8
2703#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000
2704#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10
2705#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000
2706#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18
2707#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff
2708#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0
2709#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff
2710#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0
2711#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00
2712#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8
2713#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000
2714#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10
2715#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000
2716#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18
2717#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff
2718#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0
2719#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00
2720#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8
2721#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000
2722#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10
2723#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000
2724#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18
2725#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff
2726#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0
2727#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff
2728#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0
2729#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00
2730#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8
2731#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000
2732#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10
2733#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000
2734#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18
2735#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff
2736#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0
2737#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00
2738#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8
2739#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000
2740#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10
2741#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000
2742#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18
2743#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff
2744#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0
2745#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff
2746#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0
2747#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00
2748#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8
2749#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000
2750#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10
2751#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000
2752#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18
2753#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff
2754#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0
2755#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00
2756#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8
2757#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000
2758#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10
2759#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000
2760#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18
2761#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff
2762#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0
2763#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff
2764#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0
2765#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00
2766#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8
2767#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000
2768#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10
2769#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000
2770#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18
2771#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff
2772#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0
2773#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00
2774#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8
2775#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000
2776#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10
2777#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000
2778#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18
2779#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff
2780#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0
2781#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff
2782#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0
2783#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00
2784#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8
2785#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000
2786#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10
2787#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000
2788#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18
2789#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff
2790#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0
2791#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00
2792#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8
2793#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000
2794#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10
2795#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000
2796#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18
2797#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff
2798#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0
2799#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff
2800#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0
2801#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00
2802#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8
2803#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000
2804#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10
2805#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000
2806#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18
2807#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff
2808#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0
2809#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00
2810#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8
2811#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000
2812#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10
2813#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000
2814#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18
2815#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff
2816#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0
2817#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff
2818#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0
2819#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00
2820#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8
2821#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000
2822#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10
2823#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000
2824#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18
2825#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff
2826#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0
2827#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00
2828#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8
2829#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000
2830#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10
2831#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000
2832#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18
2833#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff
2834#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0
2835#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff
2836#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0
2837#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00
2838#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8
2839#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000
2840#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10
2841#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000
2842#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18
2843#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff
2844#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0
2845#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00
2846#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8
2847#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000
2848#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10
2849#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000
2850#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18
2851#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff
2852#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0
2853#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff
2854#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0
2855#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00
2856#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8
2857#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000
2858#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10
2859#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000
2860#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18
2861#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff
2862#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0
2863#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00
2864#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8
2865#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000
2866#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10
2867#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000
2868#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18
2869#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff
2870#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0
2871#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff
2872#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0
2873#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00
2874#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8
2875#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000
2876#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10
2877#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000
2878#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18
2879#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff
2880#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0
2881#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00
2882#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8
2883#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000
2884#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10
2885#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000
2886#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18
2887#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff
2888#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0
2889#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff
2890#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0
2891#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00
2892#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8
2893#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000
2894#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10
2895#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000
2896#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18
2897#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff
2898#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0
2899#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00
2900#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8
2901#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000
2902#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10
2903#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000
2904#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18
2905#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff
2906#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0
2907#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff
2908#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0
2909#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00
2910#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8
2911#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000
2912#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10
2913#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000
2914#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18
2915#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff
2916#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0
2917#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00
2918#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8
2919#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000
2920#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10
2921#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000
2922#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18
2923#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff
2924#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0
2925#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff
2926#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0
2927#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00
2928#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8
2929#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000
2930#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10
2931#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000
2932#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18
2933#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff
2934#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0
2935#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00
2936#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8
2937#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000
2938#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10
2939#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000
2940#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18
2941#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff
2942#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0
2943#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff
2944#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0
2945#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00
2946#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8
2947#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000
2948#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10
2949#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000
2950#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18
2951#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff
2952#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0
2953#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00
2954#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8
2955#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000
2956#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10
2957#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000
2958#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18
2959#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff
2960#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0
2961#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff
2962#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0
2963#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00
2964#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8
2965#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000
2966#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10
2967#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000
2968#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18
2969#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff
2970#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0
2971#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00
2972#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8
2973#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000
2974#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10
2975#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000
2976#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18
2977#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff
2978#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0
2979#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff
2980#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0
2981#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00
2982#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8
2983#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000
2984#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10
2985#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000
2986#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18
2987#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff
2988#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0
2989#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00
2990#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8
2991#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000
2992#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10
2993#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000
2994#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18
2995#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff
2996#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0
2997#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff
2998#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0
2999#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00
3000#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8
3001#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000
3002#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10
3003#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000
3004#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18
3005#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff
3006#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0
3007#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00
3008#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8
3009#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000
3010#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10
3011#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000
3012#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18
3013#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff
3014#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0
3015#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff
3016#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0
3017#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00
3018#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8
3019#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000
3020#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10
3021#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000
3022#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18
3023#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff
3024#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0
3025#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00
3026#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8
3027#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000
3028#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10
3029#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000
3030#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18
3031#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff
3032#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0
3033#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff
3034#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0
3035#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00
3036#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8
3037#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000
3038#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10
3039#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000
3040#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18
3041#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff
3042#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0
3043#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00
3044#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8
3045#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000
3046#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10
3047#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000
3048#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18
3049#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff
3050#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0
3051#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff
3052#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0
3053#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff
3054#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0
3055#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00
3056#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8
3057#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000
3058#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10
3059#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff
3060#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0
3061#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff
3062#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0
3063#define DPM_TABLE_366__Smio_0_MASK 0xffffffff
3064#define DPM_TABLE_366__Smio_0__SHIFT 0x0
3065#define DPM_TABLE_367__Smio_1_MASK 0xffffffff
3066#define DPM_TABLE_367__Smio_1__SHIFT 0x0
3067#define DPM_TABLE_368__Smio_2_MASK 0xffffffff
3068#define DPM_TABLE_368__Smio_2__SHIFT 0x0
3069#define DPM_TABLE_369__Smio_3_MASK 0xffffffff
3070#define DPM_TABLE_369__Smio_3__SHIFT 0x0
3071#define DPM_TABLE_370__Smio_4_MASK 0xffffffff
3072#define DPM_TABLE_370__Smio_4__SHIFT 0x0
3073#define DPM_TABLE_371__Smio_5_MASK 0xffffffff
3074#define DPM_TABLE_371__Smio_5__SHIFT 0x0
3075#define DPM_TABLE_372__Smio_6_MASK 0xffffffff
3076#define DPM_TABLE_372__Smio_6__SHIFT 0x0
3077#define DPM_TABLE_373__Smio_7_MASK 0xffffffff
3078#define DPM_TABLE_373__Smio_7__SHIFT 0x0
3079#define DPM_TABLE_374__Smio_8_MASK 0xffffffff
3080#define DPM_TABLE_374__Smio_8__SHIFT 0x0
3081#define DPM_TABLE_375__Smio_9_MASK 0xffffffff
3082#define DPM_TABLE_375__Smio_9__SHIFT 0x0
3083#define DPM_TABLE_376__Smio_10_MASK 0xffffffff
3084#define DPM_TABLE_376__Smio_10__SHIFT 0x0
3085#define DPM_TABLE_377__Smio_11_MASK 0xffffffff
3086#define DPM_TABLE_377__Smio_11__SHIFT 0x0
3087#define DPM_TABLE_378__Smio_12_MASK 0xffffffff
3088#define DPM_TABLE_378__Smio_12__SHIFT 0x0
3089#define DPM_TABLE_379__Smio_13_MASK 0xffffffff
3090#define DPM_TABLE_379__Smio_13__SHIFT 0x0
3091#define DPM_TABLE_380__Smio_14_MASK 0xffffffff
3092#define DPM_TABLE_380__Smio_14__SHIFT 0x0
3093#define DPM_TABLE_381__Smio_15_MASK 0xffffffff
3094#define DPM_TABLE_381__Smio_15__SHIFT 0x0
3095#define DPM_TABLE_382__Smio_16_MASK 0xffffffff
3096#define DPM_TABLE_382__Smio_16__SHIFT 0x0
3097#define DPM_TABLE_383__Smio_17_MASK 0xffffffff
3098#define DPM_TABLE_383__Smio_17__SHIFT 0x0
3099#define DPM_TABLE_384__Smio_18_MASK 0xffffffff
3100#define DPM_TABLE_384__Smio_18__SHIFT 0x0
3101#define DPM_TABLE_385__Smio_19_MASK 0xffffffff
3102#define DPM_TABLE_385__Smio_19__SHIFT 0x0
3103#define DPM_TABLE_386__Smio_20_MASK 0xffffffff
3104#define DPM_TABLE_386__Smio_20__SHIFT 0x0
3105#define DPM_TABLE_387__Smio_21_MASK 0xffffffff
3106#define DPM_TABLE_387__Smio_21__SHIFT 0x0
3107#define DPM_TABLE_388__Smio_22_MASK 0xffffffff
3108#define DPM_TABLE_388__Smio_22__SHIFT 0x0
3109#define DPM_TABLE_389__Smio_23_MASK 0xffffffff
3110#define DPM_TABLE_389__Smio_23__SHIFT 0x0
3111#define DPM_TABLE_390__Smio_24_MASK 0xffffffff
3112#define DPM_TABLE_390__Smio_24__SHIFT 0x0
3113#define DPM_TABLE_391__Smio_25_MASK 0xffffffff
3114#define DPM_TABLE_391__Smio_25__SHIFT 0x0
3115#define DPM_TABLE_392__Smio_26_MASK 0xffffffff
3116#define DPM_TABLE_392__Smio_26__SHIFT 0x0
3117#define DPM_TABLE_393__Smio_27_MASK 0xffffffff
3118#define DPM_TABLE_393__Smio_27__SHIFT 0x0
3119#define DPM_TABLE_394__Smio_28_MASK 0xffffffff
3120#define DPM_TABLE_394__Smio_28__SHIFT 0x0
3121#define DPM_TABLE_395__Smio_29_MASK 0xffffffff
3122#define DPM_TABLE_395__Smio_29__SHIFT 0x0
3123#define DPM_TABLE_396__Smio_30_MASK 0xffffffff
3124#define DPM_TABLE_396__Smio_30__SHIFT 0x0
3125#define DPM_TABLE_397__Smio_31_MASK 0xffffffff
3126#define DPM_TABLE_397__Smio_31__SHIFT 0x0
3127#define DPM_TABLE_398__SamuBootLevel_MASK 0xff
3128#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0
3129#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00
3130#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8
3131#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000
3132#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10
3133#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000
3134#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18
3135#define DPM_TABLE_399__GraphicsInterval_MASK 0xff
3136#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0
3137#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00
3138#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8
3139#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000
3140#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10
3141#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000
3142#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18
3143#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff
3144#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0
3145#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000
3146#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10
3147#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000
3148#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18
3149#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff
3150#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0
3151#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00
3152#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8
3153#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000
3154#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10
3155#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff
3156#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0
3157#define DPM_TABLE_402__MemoryInterval_MASK 0xff00
3158#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8
3159#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000
3160#define DPM_TABLE_402__BootMVdd__SHIFT 0x10
3161#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff
3162#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0
3163#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000
3164#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10
3165#define DPM_TABLE_404__DTEMode_MASK 0xff
3166#define DPM_TABLE_404__DTEMode__SHIFT 0x0
3167#define DPM_TABLE_404__DTEInterval_MASK 0xff00
3168#define DPM_TABLE_404__DTEInterval__SHIFT 0x8
3169#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000
3170#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10
3171#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000
3172#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18
3173#define DPM_TABLE_405__ThermGpio_MASK 0xff
3174#define DPM_TABLE_405__ThermGpio__SHIFT 0x0
3175#define DPM_TABLE_405__AcDcGpio_MASK 0xff00
3176#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8
3177#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000
3178#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10
3179#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000
3180#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18
3181#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff
3182#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0
3183#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000
3184#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10
3185#define DPM_TABLE_407__TargetTdp_MASK 0xffff
3186#define DPM_TABLE_407__TargetTdp__SHIFT 0x0
3187#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000
3188#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10
3189#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff
3190#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0
3191#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000
3192#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10
3193#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff
3194#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0
3195#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000
3196#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10
3197#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff
3198#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0
3199#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000
3200#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10
3201#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff
3202#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0
3203#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000
3204#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10
3205#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff
3206#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0
3207#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000
3208#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10
3209#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff
3210#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0
3211#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000
3212#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10
3213#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff
3214#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0
3215#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000
3216#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10
3217#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff
3218#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0
3219#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000
3220#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10
3221#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff
3222#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0
3223#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000
3224#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10
3225#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff
3226#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0
3227#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000
3228#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10
3229#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff
3230#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0
3231#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000
3232#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10
3233#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff
3234#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0
3235#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000
3236#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10
3237#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff
3238#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0
3239#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000
3240#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10
3241#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff
3242#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0
3243#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000
3244#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10
3245#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff
3246#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0
3247#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000
3248#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10
3249#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff
3250#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0
3251#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000
3252#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10
3253#define DPM_TABLE_424__GpuTjHyst_MASK 0xff
3254#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0
3255#define DPM_TABLE_424__GpuTjMax_MASK 0xff00
3256#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8
3257#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000
3258#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10
3259#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000
3260#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18
3261#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff
3262#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0
3263#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00
3264#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8
3265#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000
3266#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10
3267#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000
3268#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18
3269#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff
3270#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0
3271#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff
3272#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0
3273#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff
3274#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0
3275#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff
3276#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0
3277#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00
3278#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8
3279#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000
3280#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10
3281#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000
3282#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18
3283#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff
3284#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0
3285#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00
3286#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8
3287#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000
3288#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10
3289#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000
3290#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18
3291#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff
3292#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0
3293#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00
3294#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8
3295#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000
3296#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10
3297#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000
3298#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18
3299#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff
3300#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0
3301#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00
3302#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8
3303#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000
3304#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10
3305#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000
3306#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18
3307#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff
3308#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0
3309#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00
3310#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8
3311#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000
3312#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10
3313#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000
3314#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18
3315#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff
3316#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0
3317#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00
3318#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8
3319#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000
3320#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10
3321#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000
3322#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18
3323#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff
3324#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0
3325#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00
3326#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8
3327#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000
3328#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10
3329#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000
3330#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18
3331#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff
3332#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0
3333#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00
3334#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8
3335#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000
3336#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10
3337#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000
3338#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18
3339#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff
3340#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0
3341#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00
3342#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8
3343#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000
3344#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10
3345#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000
3346#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18
3347#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff
3348#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0
3349#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00
3350#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8
3351#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000
3352#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10
3353#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000
3354#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18
3355#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff
3356#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0
3357#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00
3358#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8
3359#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000
3360#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10
3361#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000
3362#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18
3363#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff
3364#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0
3365#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00
3366#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8
3367#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000
3368#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10
3369#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000
3370#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18
3371#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff
3372#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0
3373#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff
3374#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0
3375#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff
3376#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0
3377#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff
3378#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0
3379#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff
3380#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0
3381#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff
3382#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0
3383#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff
3384#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0
3385#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff
3386#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0
3387#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff
3388#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0
3389#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff
3390#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0
3391#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff
3392#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0
3393#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff
3394#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0
3395#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff
3396#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0
3397#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff
3398#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0
3399#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00
3400#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8
3401#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000
3402#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10
3403#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000
3404#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18
3405#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff
3406#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0
3407#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00
3408#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8
3409#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000
3410#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10
3411#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000
3412#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18
3413#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff
3414#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0
3415#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff
3416#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0
3417#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff
3418#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0
3419#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff
3420#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0
3421#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00
3422#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8
3423#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000
3424#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10
3425#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000
3426#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18
3427#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff
3428#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0
3429#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00
3430#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8
3431#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000
3432#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10
3433#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000
3434#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18
3435#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff
3436#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0
3437#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff
3438#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0
3439#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff
3440#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0
3441#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff
3442#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0
3443#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff
3444#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0
3445#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff
3446#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0
3447#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff
3448#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0
3449#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff
3450#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0
3451#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff
3452#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0
3453#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff
3454#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0
3455#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff
3456#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0
3457#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00
3458#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8
3459#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000
3460#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10
3461#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000
3462#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18
3463#define PM_FUSES_2__TDC_MAWt_MASK 0xff
3464#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0
3465#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00
3466#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8
3467#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000
3468#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10
3469#define PM_FUSES_3__Reserved_MASK 0xff
3470#define PM_FUSES_3__Reserved__SHIFT 0x0
3471#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00
3472#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8
3473#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000
3474#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10
3475#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000
3476#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18
3477#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff
3478#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0
3479#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00
3480#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8
3481#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000
3482#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10
3483#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000
3484#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18
3485#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff
3486#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0
3487#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00
3488#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8
3489#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000
3490#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10
3491#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000
3492#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18
3493#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff
3494#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0
3495#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00
3496#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8
3497#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000
3498#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10
3499#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000
3500#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18
3501#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff
3502#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0
3503#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00
3504#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8
3505#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000
3506#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10
3507#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000
3508#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18
3509#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff
3510#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0
3511#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000
3512#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10
3513#define PM_FUSES_9__Reserved6_MASK 0xffff
3514#define PM_FUSES_9__Reserved6__SHIFT 0x0
3515#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000
3516#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10
3517#define PM_FUSES_10__GnbLPML_3_MASK 0xff
3518#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0
3519#define PM_FUSES_10__GnbLPML_2_MASK 0xff00
3520#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8
3521#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000
3522#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10
3523#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000
3524#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18
3525#define PM_FUSES_11__GnbLPML_7_MASK 0xff
3526#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0
3527#define PM_FUSES_11__GnbLPML_6_MASK 0xff00
3528#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8
3529#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000
3530#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10
3531#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000
3532#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18
3533#define PM_FUSES_12__GnbLPML_11_MASK 0xff
3534#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0
3535#define PM_FUSES_12__GnbLPML_10_MASK 0xff00
3536#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8
3537#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000
3538#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10
3539#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000
3540#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18
3541#define PM_FUSES_13__GnbLPML_15_MASK 0xff
3542#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0
3543#define PM_FUSES_13__GnbLPML_14_MASK 0xff00
3544#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8
3545#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000
3546#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10
3547#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000
3548#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18
3549#define PM_FUSES_14__Reserved1_1_MASK 0xff
3550#define PM_FUSES_14__Reserved1_1__SHIFT 0x0
3551#define PM_FUSES_14__Reserved1_0_MASK 0xff00
3552#define PM_FUSES_14__Reserved1_0__SHIFT 0x8
3553#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000
3554#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10
3555#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000
3556#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18
3557#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff
3558#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0
3559#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000
3560#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10
3561#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff
3562#define SMU_PM_STATUS_0__DATA__SHIFT 0x0
3563#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff
3564#define SMU_PM_STATUS_1__DATA__SHIFT 0x0
3565#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff
3566#define SMU_PM_STATUS_2__DATA__SHIFT 0x0
3567#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff
3568#define SMU_PM_STATUS_3__DATA__SHIFT 0x0
3569#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff
3570#define SMU_PM_STATUS_4__DATA__SHIFT 0x0
3571#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff
3572#define SMU_PM_STATUS_5__DATA__SHIFT 0x0
3573#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff
3574#define SMU_PM_STATUS_6__DATA__SHIFT 0x0
3575#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff
3576#define SMU_PM_STATUS_7__DATA__SHIFT 0x0
3577#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff
3578#define SMU_PM_STATUS_8__DATA__SHIFT 0x0
3579#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff
3580#define SMU_PM_STATUS_9__DATA__SHIFT 0x0
3581#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff
3582#define SMU_PM_STATUS_10__DATA__SHIFT 0x0
3583#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff
3584#define SMU_PM_STATUS_11__DATA__SHIFT 0x0
3585#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff
3586#define SMU_PM_STATUS_12__DATA__SHIFT 0x0
3587#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff
3588#define SMU_PM_STATUS_13__DATA__SHIFT 0x0
3589#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff
3590#define SMU_PM_STATUS_14__DATA__SHIFT 0x0
3591#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff
3592#define SMU_PM_STATUS_15__DATA__SHIFT 0x0
3593#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff
3594#define SMU_PM_STATUS_16__DATA__SHIFT 0x0
3595#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff
3596#define SMU_PM_STATUS_17__DATA__SHIFT 0x0
3597#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff
3598#define SMU_PM_STATUS_18__DATA__SHIFT 0x0
3599#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff
3600#define SMU_PM_STATUS_19__DATA__SHIFT 0x0
3601#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff
3602#define SMU_PM_STATUS_20__DATA__SHIFT 0x0
3603#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff
3604#define SMU_PM_STATUS_21__DATA__SHIFT 0x0
3605#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff
3606#define SMU_PM_STATUS_22__DATA__SHIFT 0x0
3607#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff
3608#define SMU_PM_STATUS_23__DATA__SHIFT 0x0
3609#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff
3610#define SMU_PM_STATUS_24__DATA__SHIFT 0x0
3611#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff
3612#define SMU_PM_STATUS_25__DATA__SHIFT 0x0
3613#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff
3614#define SMU_PM_STATUS_26__DATA__SHIFT 0x0
3615#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff
3616#define SMU_PM_STATUS_27__DATA__SHIFT 0x0
3617#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff
3618#define SMU_PM_STATUS_28__DATA__SHIFT 0x0
3619#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff
3620#define SMU_PM_STATUS_29__DATA__SHIFT 0x0
3621#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff
3622#define SMU_PM_STATUS_30__DATA__SHIFT 0x0
3623#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff
3624#define SMU_PM_STATUS_31__DATA__SHIFT 0x0
3625#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff
3626#define SMU_PM_STATUS_32__DATA__SHIFT 0x0
3627#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff
3628#define SMU_PM_STATUS_33__DATA__SHIFT 0x0
3629#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff
3630#define SMU_PM_STATUS_34__DATA__SHIFT 0x0
3631#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff
3632#define SMU_PM_STATUS_35__DATA__SHIFT 0x0
3633#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff
3634#define SMU_PM_STATUS_36__DATA__SHIFT 0x0
3635#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff
3636#define SMU_PM_STATUS_37__DATA__SHIFT 0x0
3637#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff
3638#define SMU_PM_STATUS_38__DATA__SHIFT 0x0
3639#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff
3640#define SMU_PM_STATUS_39__DATA__SHIFT 0x0
3641#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff
3642#define SMU_PM_STATUS_40__DATA__SHIFT 0x0
3643#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff
3644#define SMU_PM_STATUS_41__DATA__SHIFT 0x0
3645#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff
3646#define SMU_PM_STATUS_42__DATA__SHIFT 0x0
3647#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff
3648#define SMU_PM_STATUS_43__DATA__SHIFT 0x0
3649#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff
3650#define SMU_PM_STATUS_44__DATA__SHIFT 0x0
3651#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff
3652#define SMU_PM_STATUS_45__DATA__SHIFT 0x0
3653#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff
3654#define SMU_PM_STATUS_46__DATA__SHIFT 0x0
3655#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff
3656#define SMU_PM_STATUS_47__DATA__SHIFT 0x0
3657#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff
3658#define SMU_PM_STATUS_48__DATA__SHIFT 0x0
3659#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff
3660#define SMU_PM_STATUS_49__DATA__SHIFT 0x0
3661#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff
3662#define SMU_PM_STATUS_50__DATA__SHIFT 0x0
3663#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff
3664#define SMU_PM_STATUS_51__DATA__SHIFT 0x0
3665#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff
3666#define SMU_PM_STATUS_52__DATA__SHIFT 0x0
3667#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff
3668#define SMU_PM_STATUS_53__DATA__SHIFT 0x0
3669#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff
3670#define SMU_PM_STATUS_54__DATA__SHIFT 0x0
3671#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff
3672#define SMU_PM_STATUS_55__DATA__SHIFT 0x0
3673#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff
3674#define SMU_PM_STATUS_56__DATA__SHIFT 0x0
3675#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff
3676#define SMU_PM_STATUS_57__DATA__SHIFT 0x0
3677#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff
3678#define SMU_PM_STATUS_58__DATA__SHIFT 0x0
3679#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff
3680#define SMU_PM_STATUS_59__DATA__SHIFT 0x0
3681#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff
3682#define SMU_PM_STATUS_60__DATA__SHIFT 0x0
3683#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff
3684#define SMU_PM_STATUS_61__DATA__SHIFT 0x0
3685#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff
3686#define SMU_PM_STATUS_62__DATA__SHIFT 0x0
3687#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff
3688#define SMU_PM_STATUS_63__DATA__SHIFT 0x0
3689#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff
3690#define SMU_PM_STATUS_64__DATA__SHIFT 0x0
3691#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff
3692#define SMU_PM_STATUS_65__DATA__SHIFT 0x0
3693#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff
3694#define SMU_PM_STATUS_66__DATA__SHIFT 0x0
3695#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff
3696#define SMU_PM_STATUS_67__DATA__SHIFT 0x0
3697#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff
3698#define SMU_PM_STATUS_68__DATA__SHIFT 0x0
3699#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff
3700#define SMU_PM_STATUS_69__DATA__SHIFT 0x0
3701#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff
3702#define SMU_PM_STATUS_70__DATA__SHIFT 0x0
3703#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff
3704#define SMU_PM_STATUS_71__DATA__SHIFT 0x0
3705#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff
3706#define SMU_PM_STATUS_72__DATA__SHIFT 0x0
3707#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff
3708#define SMU_PM_STATUS_73__DATA__SHIFT 0x0
3709#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff
3710#define SMU_PM_STATUS_74__DATA__SHIFT 0x0
3711#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff
3712#define SMU_PM_STATUS_75__DATA__SHIFT 0x0
3713#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff
3714#define SMU_PM_STATUS_76__DATA__SHIFT 0x0
3715#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff
3716#define SMU_PM_STATUS_77__DATA__SHIFT 0x0
3717#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff
3718#define SMU_PM_STATUS_78__DATA__SHIFT 0x0
3719#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff
3720#define SMU_PM_STATUS_79__DATA__SHIFT 0x0
3721#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff
3722#define SMU_PM_STATUS_80__DATA__SHIFT 0x0
3723#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff
3724#define SMU_PM_STATUS_81__DATA__SHIFT 0x0
3725#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff
3726#define SMU_PM_STATUS_82__DATA__SHIFT 0x0
3727#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff
3728#define SMU_PM_STATUS_83__DATA__SHIFT 0x0
3729#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff
3730#define SMU_PM_STATUS_84__DATA__SHIFT 0x0
3731#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff
3732#define SMU_PM_STATUS_85__DATA__SHIFT 0x0
3733#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff
3734#define SMU_PM_STATUS_86__DATA__SHIFT 0x0
3735#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff
3736#define SMU_PM_STATUS_87__DATA__SHIFT 0x0
3737#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff
3738#define SMU_PM_STATUS_88__DATA__SHIFT 0x0
3739#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff
3740#define SMU_PM_STATUS_89__DATA__SHIFT 0x0
3741#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff
3742#define SMU_PM_STATUS_90__DATA__SHIFT 0x0
3743#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff
3744#define SMU_PM_STATUS_91__DATA__SHIFT 0x0
3745#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff
3746#define SMU_PM_STATUS_92__DATA__SHIFT 0x0
3747#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff
3748#define SMU_PM_STATUS_93__DATA__SHIFT 0x0
3749#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff
3750#define SMU_PM_STATUS_94__DATA__SHIFT 0x0
3751#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff
3752#define SMU_PM_STATUS_95__DATA__SHIFT 0x0
3753#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff
3754#define SMU_PM_STATUS_96__DATA__SHIFT 0x0
3755#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff
3756#define SMU_PM_STATUS_97__DATA__SHIFT 0x0
3757#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff
3758#define SMU_PM_STATUS_98__DATA__SHIFT 0x0
3759#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff
3760#define SMU_PM_STATUS_99__DATA__SHIFT 0x0
3761#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff
3762#define SMU_PM_STATUS_100__DATA__SHIFT 0x0
3763#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff
3764#define SMU_PM_STATUS_101__DATA__SHIFT 0x0
3765#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff
3766#define SMU_PM_STATUS_102__DATA__SHIFT 0x0
3767#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff
3768#define SMU_PM_STATUS_103__DATA__SHIFT 0x0
3769#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff
3770#define SMU_PM_STATUS_104__DATA__SHIFT 0x0
3771#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff
3772#define SMU_PM_STATUS_105__DATA__SHIFT 0x0
3773#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff
3774#define SMU_PM_STATUS_106__DATA__SHIFT 0x0
3775#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff
3776#define SMU_PM_STATUS_107__DATA__SHIFT 0x0
3777#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff
3778#define SMU_PM_STATUS_108__DATA__SHIFT 0x0
3779#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff
3780#define SMU_PM_STATUS_109__DATA__SHIFT 0x0
3781#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff
3782#define SMU_PM_STATUS_110__DATA__SHIFT 0x0
3783#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff
3784#define SMU_PM_STATUS_111__DATA__SHIFT 0x0
3785#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff
3786#define SMU_PM_STATUS_112__DATA__SHIFT 0x0
3787#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff
3788#define SMU_PM_STATUS_113__DATA__SHIFT 0x0
3789#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff
3790#define SMU_PM_STATUS_114__DATA__SHIFT 0x0
3791#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff
3792#define SMU_PM_STATUS_115__DATA__SHIFT 0x0
3793#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff
3794#define SMU_PM_STATUS_116__DATA__SHIFT 0x0
3795#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff
3796#define SMU_PM_STATUS_117__DATA__SHIFT 0x0
3797#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff
3798#define SMU_PM_STATUS_118__DATA__SHIFT 0x0
3799#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff
3800#define SMU_PM_STATUS_119__DATA__SHIFT 0x0
3801#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff
3802#define SMU_PM_STATUS_120__DATA__SHIFT 0x0
3803#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff
3804#define SMU_PM_STATUS_121__DATA__SHIFT 0x0
3805#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff
3806#define SMU_PM_STATUS_122__DATA__SHIFT 0x0
3807#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff
3808#define SMU_PM_STATUS_123__DATA__SHIFT 0x0
3809#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff
3810#define SMU_PM_STATUS_124__DATA__SHIFT 0x0
3811#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff
3812#define SMU_PM_STATUS_125__DATA__SHIFT 0x0
3813#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff
3814#define SMU_PM_STATUS_126__DATA__SHIFT 0x0
3815#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff
3816#define SMU_PM_STATUS_127__DATA__SHIFT 0x0
3817#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1
3818#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
3819#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2
3820#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1
3821#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4
3822#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2
3823#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8
3824#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3
3825#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10
3826#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4
3827#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20
3828#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5
3829#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff
3830#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0
3831#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00
3832#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8
3833#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000
3834#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10
3835#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000
3836#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18
3837#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000
3838#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19
3839#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000
3840#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a
3841#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000
3842#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b
3843#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000
3844#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c
3845#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1
3846#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0
3847#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2
3848#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1
3849#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4
3850#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2
3851#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8
3852#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3
3853#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7
3854#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0
3855#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8
3856#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3
3857#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0
3858#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4
3859#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000
3860#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe
3861#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000
3862#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16
3863#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000
3864#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19
3865#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000
3866#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a
3867#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff
3868#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0
3869#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00
3870#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
3871#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000
3872#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11
3873#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000
3874#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12
3875#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff
3876#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0
3877#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00
3878#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8
3879#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000
3880#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10
3881#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000
3882#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18
3883#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf
3884#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0
3885#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0
3886#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4
3887#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200
3888#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9
3889#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000
3890#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14
3891#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000
3892#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c
3893#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff
3894#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0
3895#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00
3896#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
3897#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1
3898#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0
3899#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe
3900#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1
3901#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200
3902#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9
3903#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400
3904#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa
3905#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800
3906#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb
3907#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000
3908#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc
3909#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000
3910#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd
3911#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff
3912#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0
3913#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000
3914#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10
3915#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1
3916#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0
3917#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2
3918#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1
3919#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc
3920#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2
3921#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000
3922#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc
3923#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000
3924#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18
3925#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff
3926#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0
3927#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff
3928#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
3929#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00
3930#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8
3931#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000
3932#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10
3933#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000
3934#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11
3935#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000
3936#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17
3937#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000
3938#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18
3939#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff
3940#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
3941#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00
3942#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8
3943#define CG_FDO_CTRL1__M_MASK 0xff0000
3944#define CG_FDO_CTRL1__M__SHIFT 0x10
3945#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000
3946#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18
3947#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000
3948#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e
3949#define CG_FDO_CTRL2__TMIN_MASK 0xff
3950#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
3951#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700
3952#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8
3953#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800
3954#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
3955#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000
3956#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe
3957#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000
3958#define CG_FDO_CTRL2__TMAX__SHIFT 0x11
3959#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000
3960#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19
3961#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7
3962#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0
3963#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8
3964#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
3965#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff
3966#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0
3967#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe
3968#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1
3969#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00
3970#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9
3971#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000
3972#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11
3973#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000
3974#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12
3975#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000
3976#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15
3977#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000
3978#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18
3979#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000
3980#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19
3981#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000
3982#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a
3983#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000
3984#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b
3985#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000
3986#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c
3987#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000
3988#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d
3989#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000
3990#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f
3991#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff
3992#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0
3993#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800
3994#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb
3995#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000
3996#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc
3997#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff
3998#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0
3999#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800
4000#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb
4001#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000
4002#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc
4003#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff
4004#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0
4005#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800
4006#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb
4007#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000
4008#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc
4009#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff
4010#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0
4011#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800
4012#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb
4013#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000
4014#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc
4015#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff
4016#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0
4017#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800
4018#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb
4019#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000
4020#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc
4021#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff
4022#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0
4023#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800
4024#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb
4025#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000
4026#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc
4027#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff
4028#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0
4029#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800
4030#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb
4031#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000
4032#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc
4033#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff
4034#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0
4035#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800
4036#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb
4037#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000
4038#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc
4039#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff
4040#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0
4041#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800
4042#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb
4043#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000
4044#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc
4045#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff
4046#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0
4047#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800
4048#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb
4049#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000
4050#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc
4051#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff
4052#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0
4053#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800
4054#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb
4055#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000
4056#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc
4057#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff
4058#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0
4059#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800
4060#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb
4061#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000
4062#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc
4063#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff
4064#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0
4065#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800
4066#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb
4067#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000
4068#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc
4069#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff
4070#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0
4071#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800
4072#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb
4073#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000
4074#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc
4075#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff
4076#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0
4077#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800
4078#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb
4079#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000
4080#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc
4081#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff
4082#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0
4083#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800
4084#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb
4085#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000
4086#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc
4087#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff
4088#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0
4089#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800
4090#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb
4091#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000
4092#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc
4093#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff
4094#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0
4095#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800
4096#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb
4097#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000
4098#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc
4099#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff
4100#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0
4101#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800
4102#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb
4103#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000
4104#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc
4105#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff
4106#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0
4107#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800
4108#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb
4109#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000
4110#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc
4111#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff
4112#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0
4113#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800
4114#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb
4115#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000
4116#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc
4117#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff
4118#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0
4119#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800
4120#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb
4121#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000
4122#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc
4123#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff
4124#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0
4125#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800
4126#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb
4127#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000
4128#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc
4129#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff
4130#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0
4131#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800
4132#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb
4133#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000
4134#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc
4135#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff
4136#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0
4137#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800
4138#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb
4139#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000
4140#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc
4141#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff
4142#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0
4143#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800
4144#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb
4145#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000
4146#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc
4147#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff
4148#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0
4149#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800
4150#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb
4151#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000
4152#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc
4153#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff
4154#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0
4155#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800
4156#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb
4157#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000
4158#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc
4159#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff
4160#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0
4161#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800
4162#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb
4163#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000
4164#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc
4165#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff
4166#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0
4167#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800
4168#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb
4169#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000
4170#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc
4171#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff
4172#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0
4173#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800
4174#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb
4175#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000
4176#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc
4177#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff
4178#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0
4179#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800
4180#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb
4181#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000
4182#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc
4183#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff
4184#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0
4185#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800
4186#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb
4187#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000
4188#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc
4189#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff
4190#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0
4191#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800
4192#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb
4193#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000
4194#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc
4195#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff
4196#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0
4197#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800
4198#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb
4199#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000
4200#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc
4201#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff
4202#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0
4203#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800
4204#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb
4205#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000
4206#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc
4207#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff
4208#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0
4209#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800
4210#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb
4211#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000
4212#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc
4213#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff
4214#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0
4215#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800
4216#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb
4217#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000
4218#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc
4219#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff
4220#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0
4221#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800
4222#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb
4223#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000
4224#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc
4225#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff
4226#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0
4227#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800
4228#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb
4229#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000
4230#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc
4231#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff
4232#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0
4233#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800
4234#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb
4235#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000
4236#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc
4237#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff
4238#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0
4239#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800
4240#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb
4241#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000
4242#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc
4243#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff
4244#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0
4245#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800
4246#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb
4247#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000
4248#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc
4249#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff
4250#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0
4251#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800
4252#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb
4253#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000
4254#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc
4255#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff
4256#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0
4257#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800
4258#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb
4259#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000
4260#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc
4261#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff
4262#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0
4263#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800
4264#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb
4265#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000
4266#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc
4267#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff
4268#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0
4269#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800
4270#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb
4271#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000
4272#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc
4273#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff
4274#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0
4275#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800
4276#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb
4277#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000
4278#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc
4279#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff
4280#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0
4281#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800
4282#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb
4283#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000
4284#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc
4285#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff
4286#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0
4287#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800
4288#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb
4289#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000
4290#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc
4291#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff
4292#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0
4293#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800
4294#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb
4295#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000
4296#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc
4297#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff
4298#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0
4299#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800
4300#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb
4301#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000
4302#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc
4303#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff
4304#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0
4305#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800
4306#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb
4307#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000
4308#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc
4309#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff
4310#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0
4311#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800
4312#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb
4313#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000
4314#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc
4315#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff
4316#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0
4317#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800
4318#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb
4319#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000
4320#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc
4321#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff
4322#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0
4323#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800
4324#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb
4325#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000
4326#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc
4327#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff
4328#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0
4329#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800
4330#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb
4331#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000
4332#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc
4333#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff
4334#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0
4335#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800
4336#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb
4337#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000
4338#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc
4339#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff
4340#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0
4341#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800
4342#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb
4343#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000
4344#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc
4345#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff
4346#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0
4347#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800
4348#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb
4349#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000
4350#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc
4351#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff
4352#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0
4353#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800
4354#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb
4355#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000
4356#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc
4357#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff
4358#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0
4359#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800
4360#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb
4361#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000
4362#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc
4363#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff
4364#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0
4365#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800
4366#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb
4367#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000
4368#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc
4369#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff
4370#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0
4371#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800
4372#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb
4373#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000
4374#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc
4375#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff
4376#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0
4377#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800
4378#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb
4379#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000
4380#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc
4381#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff
4382#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0
4383#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800
4384#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb
4385#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000
4386#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc
4387#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff
4388#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0
4389#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800
4390#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb
4391#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000
4392#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc
4393#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff
4394#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0
4395#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800
4396#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb
4397#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000
4398#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc
4399#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff
4400#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0
4401#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800
4402#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb
4403#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000
4404#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc
4405#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff
4406#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0
4407#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800
4408#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb
4409#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000
4410#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc
4411#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff
4412#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0
4413#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800
4414#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb
4415#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000
4416#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc
4417#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff
4418#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0
4419#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800
4420#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb
4421#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000
4422#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc
4423#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff
4424#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0
4425#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800
4426#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb
4427#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000
4428#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc
4429#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff
4430#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0
4431#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800
4432#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb
4433#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000
4434#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc
4435#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff
4436#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0
4437#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800
4438#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb
4439#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000
4440#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc
4441#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff
4442#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0
4443#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800
4444#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb
4445#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000
4446#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc
4447#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff
4448#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0
4449#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800
4450#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb
4451#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000
4452#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc
4453#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff
4454#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0
4455#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800
4456#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb
4457#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000
4458#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc
4459#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff
4460#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0
4461#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800
4462#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb
4463#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000
4464#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc
4465#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff
4466#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0
4467#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800
4468#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb
4469#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000
4470#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc
4471#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff
4472#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0
4473#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800
4474#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb
4475#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000
4476#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc
4477#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff
4478#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0
4479#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800
4480#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb
4481#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000
4482#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc
4483#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff
4484#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0
4485#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800
4486#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb
4487#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000
4488#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc
4489#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff
4490#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0
4491#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800
4492#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb
4493#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000
4494#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc
4495#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff
4496#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0
4497#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800
4498#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb
4499#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000
4500#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc
4501#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff
4502#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0
4503#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800
4504#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb
4505#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000
4506#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc
4507#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff
4508#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0
4509#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800
4510#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb
4511#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000
4512#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc
4513#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff
4514#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0
4515#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800
4516#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb
4517#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000
4518#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc
4519#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff
4520#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0
4521#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800
4522#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb
4523#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000
4524#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc
4525#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff
4526#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0
4527#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800
4528#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb
4529#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000
4530#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc
4531#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff
4532#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0
4533#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800
4534#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb
4535#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000
4536#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc
4537#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff
4538#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0
4539#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800
4540#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb
4541#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000
4542#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc
4543#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff
4544#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0
4545#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800
4546#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb
4547#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000
4548#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc
4549#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff
4550#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0
4551#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800
4552#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb
4553#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000
4554#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc
4555#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff
4556#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0
4557#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800
4558#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb
4559#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000
4560#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc
4561#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff
4562#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0
4563#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800
4564#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb
4565#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000
4566#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc
4567#define THM_TMON0_INT_DATA__Z_MASK 0x7ff
4568#define THM_TMON0_INT_DATA__Z__SHIFT 0x0
4569#define THM_TMON0_INT_DATA__VALID_MASK 0x800
4570#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb
4571#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000
4572#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc
4573#define THM_TMON1_INT_DATA__Z_MASK 0x7ff
4574#define THM_TMON1_INT_DATA__Z__SHIFT 0x0
4575#define THM_TMON1_INT_DATA__VALID_MASK 0x800
4576#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb
4577#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000
4578#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc
4579#define THM_TMON2_INT_DATA__Z_MASK 0x7ff
4580#define THM_TMON2_INT_DATA__Z__SHIFT 0x0
4581#define THM_TMON2_INT_DATA__VALID_MASK 0x800
4582#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb
4583#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000
4584#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc
4585#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f
4586#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0
4587#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0
4588#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5
4589#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f
4590#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0
4591#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0
4592#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5
4593#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f
4594#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0
4595#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0
4596#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5
4597#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f
4598#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0
4599#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20
4600#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5
4601#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f
4602#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0
4603#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20
4604#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5
4605#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f
4606#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0
4607#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20
4608#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5
4609#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1
4610#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0
4611#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2
4612#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1
4613#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4
4614#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2
4615#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8
4616#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3
4617#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40
4618#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6
4619#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100
4620#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8
4621#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200
4622#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9
4623#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400
4624#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa
4625#define GENERAL_PWRMGT__SPARE11_MASK 0x800
4626#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb
4627#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000
4628#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe
4629#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000
4630#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf
4631#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000
4632#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10
4633#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000
4634#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11
4635#define GENERAL_PWRMGT__SPARE18_MASK 0x40000
4636#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12
4637#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000
4638#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13
4639#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000
4640#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17
4641#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000
4642#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b
4643#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000
4644#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c
4645#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3
4646#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0
4647#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4
4648#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2
4649#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8
4650#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3
4651#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10
4652#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4
4653#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0
4654#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5
4655#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1
4656#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0
4657#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10
4658#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4
4659#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20
4660#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5
4661#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000
4662#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe
4663#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000
4664#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf
4665#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000
4666#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10
4667#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000
4668#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15
4669#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf
4670#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0
4671#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0
4672#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4
4673#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00
4674#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8
4675#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000
4676#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc
4677#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000
4678#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10
4679#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000
4680#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15
4681#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000
4682#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a
4683#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000
4684#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d
4685#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1
4686#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0
4687#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff
4688#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
4689#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
4690#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
4691#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
4692#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
4693#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
4694#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
4695#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
4696#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
4697#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
4698#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
4699#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
4700#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
4701#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
4702#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
4703#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
4704#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
4705#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
4706#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
4707#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
4708#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
4709#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
4710#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
4711#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
4712#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
4713#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
4714#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
4715#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
4716#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
4717#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
4718#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
4719#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
4720#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
4721#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
4722#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
4723#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
4724#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
4725#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
4726#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
4727#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
4728#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
4729#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
4730#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
4731#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
4732#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
4733#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
4734#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
4735#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
4736#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
4737#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
4738#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
4739#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
4740#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
4741#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
4742#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
4743#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
4744#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
4745#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
4746#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
4747#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
4748#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
4749#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
4750#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
4751#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
4752#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
4753#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
4754#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
4755#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
4756#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
4757#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
4758#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
4759#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
4760#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
4761#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
4762#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
4763#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
4764#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
4765#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
4766#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
4767#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
4768#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
4769#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
4770#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
4771#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
4772#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
4773#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
4774#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
4775#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
4776#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
4777#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
4778#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
4779#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
4780#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
4781#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
4782#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
4783#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
4784#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
4785#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
4786#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
4787#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
4788#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
4789#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
4790#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
4791#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
4792#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
4793#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
4794#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
4795#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
4796#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
4797#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
4798#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
4799#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
4800#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
4801#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
4802#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
4803#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
4804#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
4805#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
4806#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
4807#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
4808#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
4809#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
4810#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
4811#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
4812#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
4813#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
4814#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
4815#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
4816#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
4817#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
4818#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
4819#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
4820#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
4821#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
4822#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
4823#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
4824#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
4825#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
4826#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
4827#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
4828#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
4829#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
4830#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
4831#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
4832#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
4833#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
4834#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
4835#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
4836#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
4837#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
4838#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
4839#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
4840#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
4841#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
4842#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
4843#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
4844#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
4845#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
4846#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
4847#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
4848#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
4849#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
4850#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
4851#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
4852#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
4853#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
4854#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
4855#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
4856#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
4857#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
4858#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
4859#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
4860#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
4861#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
4862#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
4863#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
4864#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
4865#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
4866#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
4867#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
4868#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
4869#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
4870#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
4871#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
4872#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
4873#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
4874#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
4875#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
4876#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
4877#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
4878#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
4879#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
4880#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
4881#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
4882#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
4883#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
4884#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
4885#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
4886#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
4887#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
4888#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
4889#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
4890#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
4891#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
4892#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
4893#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
4894#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
4895#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
4896#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
4897#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
4898#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
4899#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
4900#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
4901#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
4902#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
4903#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
4904#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
4905#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
4906#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
4907#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
4908#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
4909#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
4910#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
4911#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
4912#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
4913#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
4914#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
4915#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
4916#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
4917#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
4918#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
4919#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
4920#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
4921#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
4922#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
4923#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
4924#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
4925#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
4926#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
4927#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
4928#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
4929#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
4930#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
4931#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
4932#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
4933#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
4934#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
4935#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
4936#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
4937#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
4938#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
4939#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
4940#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
4941#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
4942#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
4943#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
4944#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
4945#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
4946#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
4947#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
4948#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
4949#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
4950#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
4951#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
4952#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
4953#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
4954#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
4955#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
4956#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
4957#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
4958#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
4959#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
4960#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
4961#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
4962#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
4963#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
4964#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
4965#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
4966#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
4967#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
4968#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
4969#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
4970#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
4971#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
4972#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
4973#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
4974#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
4975#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
4976#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
4977#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
4978#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
4979#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
4980#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
4981#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
4982#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
4983#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
4984#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
4985#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
4986#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
4987#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
4988#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
4989#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
4990#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
4991#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
4992#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
4993#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
4994#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
4995#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
4996#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
4997#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
4998#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
4999#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
5000#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
5001#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
5002#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
5003#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
5004#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
5005#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
5006#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
5007#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
5008#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
5009#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
5010#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
5011#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
5012#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
5013#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
5014#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
5015#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
5016#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
5017#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
5018#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
5019#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
5020#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
5021#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
5022#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
5023#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
5024#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
5025#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
5026#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
5027#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
5028#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
5029#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
5030#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
5031#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
5032#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
5033#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
5034#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
5035#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
5036#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
5037#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
5038#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
5039#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
5040#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
5041#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
5042#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
5043#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
5044#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
5045#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
5046#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
5047#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
5048#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
5049#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
5050#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
5051#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
5052#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
5053#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
5054#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
5055#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
5056#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
5057#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
5058#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
5059#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
5060#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
5061#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
5062#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
5063#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
5064#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
5065#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
5066#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
5067#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
5068#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
5069#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
5070#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
5071#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
5072#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
5073#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
5074#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
5075#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
5076#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
5077#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
5078#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
5079#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
5080#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
5081#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
5082#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
5083#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
5084#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
5085#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
5086#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
5087#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
5088#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
5089#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
5090#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
5091#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
5092#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
5093#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
5094#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
5095#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
5096#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
5097#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
5098#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
5099#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
5100#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
5101#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
5102#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
5103#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
5104#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
5105#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
5106#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
5107#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
5108#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
5109#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
5110#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
5111#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
5112#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
5113#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
5114#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
5115#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
5116#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
5117#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
5118#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
5119#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
5120#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
5121#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
5122#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
5123#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
5124#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
5125#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
5126#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
5127#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
5128#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
5129#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
5130#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
5131#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
5132#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
5133#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
5134#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
5135#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
5136#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
5137#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
5138#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
5139#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
5140#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
5141#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
5142#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
5143#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
5144#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
5145#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
5146#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
5147#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
5148#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
5149#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
5150#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
5151#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
5152#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
5153#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
5154#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
5155#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
5156#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
5157#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
5158#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
5159#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
5160#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
5161#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
5162#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
5163#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
5164#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
5165#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
5166#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
5167#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
5168#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
5169#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
5170#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
5171#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
5172#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
5173#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
5174#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
5175#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
5176#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
5177#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
5178#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
5179#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
5180#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
5181#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
5182#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
5183#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
5184#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
5185#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf
5186#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
5187#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0
5188#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4
5189#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00
5190#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8
5191#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000
5192#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf
5193#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000
5194#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11
5195#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff
5196#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0
5197#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000
5198#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10
5199#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3
5200#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0
5201#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0
5202#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4
5203#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000
5204#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14
5205#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000
5206#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18
5207#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000
5208#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c
5209#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff
5210#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0
5211#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f
5212#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0
5213#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80
5214#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7
5215#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
5216#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
5217#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
5218#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
5219#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
5220#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
5221#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000
5222#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10
5223#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000
5224#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11
5225#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000
5226#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12
5227#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000
5228#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13
5229#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000
5230#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14
5231#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000
5232#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15
5233#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000
5234#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16
5235#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000
5236#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17
5237#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000
5238#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18
5239#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000
5240#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19
5241#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000
5242#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a
5243#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000
5244#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b
5245#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000
5246#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c
5247#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000
5248#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d
5249#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000
5250#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e
5251#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
5252#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
5253#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1
5254#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0
5255#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2
5256#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1
5257#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4
5258#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2
5259#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8
5260#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3
5261#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10
5262#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4
5263#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40
5264#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6
5265#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80
5266#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7
5267#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100
5268#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8
5269#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200
5270#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9
5271#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400
5272#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa
5273#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800
5274#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb
5275#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000
5276#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc
5277#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000
5278#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd
5279#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000
5280#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe
5281#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000
5282#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15
5283#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000
5284#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18
5285#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1
5286#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0
5287#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2
5288#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1
5289#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4
5290#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2
5291#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8
5292#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3
5293#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10
5294#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4
5295#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20
5296#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5
5297#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40
5298#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6
5299#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80
5300#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7
5301#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100
5302#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8
5303#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200
5304#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9
5305#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400
5306#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa
5307#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800
5308#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb
5309#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000
5310#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc
5311#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000
5312#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd
5313#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000
5314#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe
5315#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000
5316#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf
5317#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7
5318#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0
5319#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38
5320#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3
5321#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000
5322#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10
5323#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000
5324#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11
5325#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000
5326#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14
5327#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
5328#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
5329#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
5330#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
5331#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
5332#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
5333#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000
5334#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10
5335#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
5336#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
5337#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1
5338#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0
5339#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2
5340#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1
5341#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4
5342#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2
5343#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8
5344#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3
5345#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10
5346#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4
5347#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20
5348#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5
5349#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40
5350#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6
5351#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80
5352#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7
5353#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100
5354#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8
5355#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200
5356#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9
5357#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400
5358#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa
5359#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800
5360#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb
5361#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000
5362#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc
5363#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000
5364#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd
5365#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000
5366#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe
5367#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000
5368#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf
5369#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000
5370#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10
5371#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000
5372#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11
5373#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000
5374#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12
5375#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000
5376#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13
5377#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000
5378#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14
5379#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000
5380#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15
5381#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000
5382#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16
5383#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf
5384#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0
5385#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0
5386#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4
5387#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00
5388#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8
5389#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000
5390#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc
5391#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000
5392#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10
5393#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000
5394#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14
5395#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000
5396#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18
5397#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000
5398#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c
5399#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff
5400#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0
5401#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000
5402#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10
5403#define SCLK_MIN_DIV__FRACV_MASK 0xfff
5404#define SCLK_MIN_DIV__FRACV__SHIFT 0x0
5405#define SCLK_MIN_DIV__INTV_MASK 0x7f000
5406#define SCLK_MIN_DIV__INTV__SHIFT 0xc
5407#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff
5408#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0
5409#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff
5410#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0
5411#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100
5412#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8
5413#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200
5414#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9
5415#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00
5416#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa
5417#define PWR_AVFS_CNTL__MmState_MASK 0x3f000
5418#define PWR_AVFS_CNTL__MmState__SHIFT 0xc
5419#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000
5420#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12
5421#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000
5422#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13
5423#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000
5424#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14
5425#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000
5426#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15
5427#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000
5428#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16
5429#define PWR_AVFS_CNTL__Isolate_MASK 0x800000
5430#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17
5431#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000
5432#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18
5433#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000
5434#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19
5435#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000
5436#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a
5437#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff
5438#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0
5439#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100
5440#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8
5441#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200
5442#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5443#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff
5444#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0
5445#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100
5446#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8
5447#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200
5448#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5449#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff
5450#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0
5451#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100
5452#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8
5453#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200
5454#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5455#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff
5456#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0
5457#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100
5458#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8
5459#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200
5460#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5461#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff
5462#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0
5463#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100
5464#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8
5465#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200
5466#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5467#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff
5468#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0
5469#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100
5470#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8
5471#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200
5472#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5473#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff
5474#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0
5475#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100
5476#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8
5477#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200
5478#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5479#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff
5480#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0
5481#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100
5482#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8
5483#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200
5484#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5485#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff
5486#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0
5487#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100
5488#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8
5489#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200
5490#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5491#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff
5492#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0
5493#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100
5494#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8
5495#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200
5496#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5497#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff
5498#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0
5499#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100
5500#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8
5501#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200
5502#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5503#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff
5504#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0
5505#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100
5506#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8
5507#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200
5508#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5509#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff
5510#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0
5511#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100
5512#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8
5513#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200
5514#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5515#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff
5516#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0
5517#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100
5518#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8
5519#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200
5520#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5521#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff
5522#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0
5523#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100
5524#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8
5525#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200
5526#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5527#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff
5528#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0
5529#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100
5530#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8
5531#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200
5532#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5533#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff
5534#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0
5535#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100
5536#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8
5537#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200
5538#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5539#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff
5540#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0
5541#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100
5542#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8
5543#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200
5544#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5545#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff
5546#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0
5547#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100
5548#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8
5549#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200
5550#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5551#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff
5552#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0
5553#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100
5554#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8
5555#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200
5556#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5557#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff
5558#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0
5559#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100
5560#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8
5561#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200
5562#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5563#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff
5564#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0
5565#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100
5566#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8
5567#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200
5568#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5569#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff
5570#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0
5571#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100
5572#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8
5573#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200
5574#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5575#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff
5576#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0
5577#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100
5578#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8
5579#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200
5580#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5581#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff
5582#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0
5583#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100
5584#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8
5585#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200
5586#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5587#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff
5588#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0
5589#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100
5590#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8
5591#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200
5592#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5593#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff
5594#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0
5595#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100
5596#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8
5597#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200
5598#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5599#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff
5600#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0
5601#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100
5602#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8
5603#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200
5604#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9
5605#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1
5606#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0
5607#define PWR_CKS_ENABLE__masterReset_MASK 0x2
5608#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1
5609#define PWR_CKS_ENABLE__staticEnable_MASK 0x4
5610#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2
5611#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8
5612#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3
5613#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10
5614#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4
5615#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60
5616#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5
5617#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80
5618#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7
5619#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1
5620#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0
5621#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2
5622#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1
5623#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4
5624#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2
5625#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78
5626#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3
5627#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80
5628#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7
5629#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00
5630#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8
5631#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000
5632#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc
5633#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000
5634#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10
5635#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000
5636#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11
5637#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000
5638#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12
5639#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000
5640#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16
5641#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000
5642#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17
5643#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
5644#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
5645#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
5646#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
5647#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
5648#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
5649#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
5650#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
5651#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
5652#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
5653#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
5654#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
5655#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
5656#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
5657#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
5658#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
5659#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
5660#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
5661#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4
5662#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
5663#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
5664#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
5665#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
5666#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
5667#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
5668#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
5669#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
5670#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
5671#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
5672#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
5673#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
5674#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
5675#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
5676#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
5677#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
5678#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
5679#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
5680#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
5681#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
5682#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
5683#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4
5684#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
5685#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
5686#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
5687#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff
5688#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
5689#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff
5690#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0
5691#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000
5692#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10
5693#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1
5694#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0
5695#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2
5696#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1
5697#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4
5698#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2
5699#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8
5700#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3
5701#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1
5702#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0
5703#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1
5704#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0
5705#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe
5706#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1
5707#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000
5708#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11
5709#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000
5710#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16
5711#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff
5712#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0
5713#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff
5714#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0
5715#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1
5716#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0
5717#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe
5718#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1
5719#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000
5720#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11
5721#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000
5722#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16
5723#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff
5724#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0
5725#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff
5726#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0
5727#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1
5728#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0
5729#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe
5730#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1
5731#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000
5732#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11
5733#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000
5734#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16
5735#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff
5736#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0
5737#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff
5738#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0
5739#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1
5740#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0
5741#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe
5742#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1
5743#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000
5744#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11
5745#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000
5746#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16
5747#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff
5748#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0
5749#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff
5750#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0
5751#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1
5752#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0
5753#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe
5754#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1
5755#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000
5756#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11
5757#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000
5758#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16
5759#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff
5760#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0
5761#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff
5762#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0
5763#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1
5764#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0
5765#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe
5766#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1
5767#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000
5768#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11
5769#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000
5770#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16
5771#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff
5772#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0
5773#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff
5774#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0
5775#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1
5776#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0
5777#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe
5778#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1
5779#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000
5780#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11
5781#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000
5782#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16
5783#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff
5784#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0
5785#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff
5786#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0
5787#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1
5788#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0
5789#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe
5790#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1
5791#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000
5792#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11
5793#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000
5794#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16
5795#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff
5796#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0
5797#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff
5798#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0
5799#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1
5800#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0
5801#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe
5802#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1
5803#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000
5804#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11
5805#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000
5806#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16
5807#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff
5808#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0
5809#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff
5810#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0
5811#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
5812#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
5813#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
5814#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
5815#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2
5816#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1
5817#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4
5818#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2
5819#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00
5820#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8
5821#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000
5822#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10
5823#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000
5824#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18
5825#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000
5826#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c
5827#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff
5828#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
5829#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000
5830#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18
5831#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000
5832#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
5833#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000
5834#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
5835#define ROM_STATUS__ROM_BUSY_MASK 0x1
5836#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
5837#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf
5838#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
5839#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0
5840#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
5841#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000
5842#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
5843#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000
5844#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
5845#define ROM_INDEX__ROM_INDEX_MASK 0xffffff
5846#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
5847#define ROM_DATA__ROM_DATA_MASK 0xffffffff
5848#define ROM_DATA__ROM_DATA__SHIFT 0x0
5849#define ROM_START__ROM_START_MASK 0xffffff
5850#define ROM_START__ROM_START__SHIFT 0x0
5851#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff
5852#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
5853#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000
5854#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
5855#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000
5856#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12
5857#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1
5858#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
5859#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff
5860#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
5861#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00
5862#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
5863#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff
5864#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
5865#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff
5866#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
5867#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff
5868#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
5869#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff
5870#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
5871#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff
5872#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
5873#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff
5874#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
5875#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff
5876#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
5877#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff
5878#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
5879#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff
5880#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
5881#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff
5882#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
5883#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff
5884#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
5885#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff
5886#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
5887#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff
5888#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
5889#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff
5890#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
5891#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff
5892#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
5893#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff
5894#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
5895#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff
5896#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
5897#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff
5898#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
5899#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff
5900#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
5901#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff
5902#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
5903#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff
5904#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
5905#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff
5906#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
5907#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff
5908#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
5909#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff
5910#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
5911#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff
5912#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
5913#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff
5914#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
5915#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff
5916#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
5917#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff
5918#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
5919#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff
5920#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
5921#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff
5922#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
5923#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff
5924#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
5925#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff
5926#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
5927#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff
5928#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
5929#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff
5930#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
5931#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff
5932#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
5933#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff
5934#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
5935#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff
5936#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
5937#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff
5938#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
5939#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff
5940#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
5941#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff
5942#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
5943#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff
5944#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
5945#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff
5946#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
5947#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff
5948#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
5949#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff
5950#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
5951#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff
5952#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
5953#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff
5954#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
5955#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff
5956#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
5957#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff
5958#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
5959#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff
5960#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
5961#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff
5962#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
5963#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff
5964#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
5965#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff
5966#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
5967#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff
5968#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
5969#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff
5970#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
5971#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff
5972#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
5973#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff
5974#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
5975#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff
5976#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
5977#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff
5978#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
5979#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff
5980#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
5981#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff
5982#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
5983#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff
5984#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
5985#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff
5986#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
5987#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff
5988#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
5989#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
5990#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
5991#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
5992#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
5993#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
5994#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
5995#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
5996#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
5997#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
5998#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
5999#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
6000#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
6001#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
6002#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
6003#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
6004#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
6005#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
6006#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
6007#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff
6008#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
6009#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff
6010#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
6011#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff
6012#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
6013#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000
6014#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
6015#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff
6016#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
6017#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000
6018#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
6019#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff
6020#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
6021#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000
6022#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
6023#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff
6024#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
6025#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000
6026#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
6027#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff
6028#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
6029#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000
6030#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
6031#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff
6032#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
6033#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000
6034#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
6035#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff
6036#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0
6037#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000
6038#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10
6039#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff
6040#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0
6041#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000
6042#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10
6043#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff
6044#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
6045#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff
6046#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
6047#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff
6048#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
6049#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff
6050#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
6051#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff
6052#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
6053#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff
6054#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
6055#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff
6056#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
6057#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff
6058#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
6059#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff
6060#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
6061#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff
6062#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
6063#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff
6064#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
6065#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff
6066#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0
6067#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff
6068#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0
6069#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff
6070#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0
6071#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff
6072#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0
6073#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff
6074#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0
6075#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff
6076#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
6077#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
6078#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
6079
6080#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
index e8fae5c77514..e8fae5c77514 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-bits.h
+++ b/drivers/gpu/drm/amd/include/atom-bits.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h
index 6f907a5ffa5f..6f907a5ffa5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-names.h
+++ b/drivers/gpu/drm/amd/include/atom-names.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h
index 1125b866cdb0..1125b866cdb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-types.h
+++ b/drivers/gpu/drm/amd/include/atom-types.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 44c5d4a4d1bf..44c5d4a4d1bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
new file mode 100644
index 000000000000..992dcd8a5c6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -0,0 +1,624 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#ifndef _CGS_COMMON_H
25#define _CGS_COMMON_H
26
27#include "amd_shared.h"
28
29/**
30 * enum cgs_gpu_mem_type - GPU memory types
31 */
32enum cgs_gpu_mem_type {
33 CGS_GPU_MEM_TYPE__VISIBLE_FB,
34 CGS_GPU_MEM_TYPE__INVISIBLE_FB,
35 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
36 CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
37 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
38 CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
39};
40
41/**
42 * enum cgs_ind_reg - Indirect register spaces
43 */
44enum cgs_ind_reg {
45 CGS_IND_REG__MMIO,
46 CGS_IND_REG__PCIE,
47 CGS_IND_REG__SMC,
48 CGS_IND_REG__UVD_CTX,
49 CGS_IND_REG__DIDT,
50 CGS_IND_REG__AUDIO_ENDPT
51};
52
53/**
54 * enum cgs_clock - Clocks controlled by the SMU
55 */
56enum cgs_clock {
57 CGS_CLOCK__SCLK,
58 CGS_CLOCK__MCLK,
59 CGS_CLOCK__VCLK,
60 CGS_CLOCK__DCLK,
61 CGS_CLOCK__ECLK,
62 CGS_CLOCK__ACLK,
63 CGS_CLOCK__ICLK,
64 /* ... */
65};
66
67/**
68 * enum cgs_engine - Engines that can be statically power-gated
69 */
70enum cgs_engine {
71 CGS_ENGINE__UVD,
72 CGS_ENGINE__VCE,
73 CGS_ENGINE__VP8,
74 CGS_ENGINE__ACP_DMA,
75 CGS_ENGINE__ACP_DSP0,
76 CGS_ENGINE__ACP_DSP1,
77 CGS_ENGINE__ISP,
78 /* ... */
79};
80
81/**
82 * enum cgs_voltage_planes - Voltage planes for external camera HW
83 */
84enum cgs_voltage_planes {
85 CGS_VOLTAGE_PLANE__SENSOR0,
86 CGS_VOLTAGE_PLANE__SENSOR1,
87 /* ... */
88};
89
90/*
91 * enum cgs_ucode_id - Firmware types for different IPs
92 */
93enum cgs_ucode_id {
94 CGS_UCODE_ID_SMU = 0,
95 CGS_UCODE_ID_SDMA0,
96 CGS_UCODE_ID_SDMA1,
97 CGS_UCODE_ID_CP_CE,
98 CGS_UCODE_ID_CP_PFP,
99 CGS_UCODE_ID_CP_ME,
100 CGS_UCODE_ID_CP_MEC,
101 CGS_UCODE_ID_CP_MEC_JT1,
102 CGS_UCODE_ID_CP_MEC_JT2,
103 CGS_UCODE_ID_GMCON_RENG,
104 CGS_UCODE_ID_RLC_G,
105 CGS_UCODE_ID_MAXIMUM,
106};
107
108/**
109 * struct cgs_clock_limits - Clock limits
110 *
111 * Clocks are specified in 10KHz units.
112 */
113struct cgs_clock_limits {
114 unsigned min; /**< Minimum supported frequency */
115 unsigned max; /**< Maxumim supported frequency */
116 unsigned sustainable; /**< Thermally sustainable frequency */
117};
118
119/**
120 * struct cgs_firmware_info - Firmware information
121 */
122struct cgs_firmware_info {
123 uint16_t version;
124 uint16_t feature_version;
125 uint32_t image_size;
126 uint64_t mc_addr;
127 void *kptr;
128};
129
130typedef unsigned long cgs_handle_t;
131
132/**
133 * cgs_gpu_mem_info() - Return information about memory heaps
134 * @cgs_device: opaque device handle
135 * @type: memory type
136 * @mc_start: Start MC address of the heap (output)
137 * @mc_size: MC address space size (output)
138 * @mem_size: maximum amount of memory available for allocation (output)
139 *
140 * This function returns information about memory heaps. The type
141 * parameter is used to select the memory heap. The mc_start and
142 * mc_size for GART heaps may be bigger than the memory available for
143 * allocation.
144 *
145 * mc_start and mc_size are undefined for non-contiguous FB memory
146 * types, since buffers allocated with these types may or may not be
147 * GART mapped.
148 *
149 * Return: 0 on success, -errno otherwise
150 */
151typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
152 uint64_t *mc_start, uint64_t *mc_size,
153 uint64_t *mem_size);
154
155/**
156 * cgs_gmap_kmem() - map kernel memory to GART aperture
157 * @cgs_device: opaque device handle
158 * @kmem: pointer to kernel memory
159 * @size: size to map
160 * @min_offset: minimum offset from start of GART aperture
161 * @max_offset: maximum offset from start of GART aperture
162 * @kmem_handle: kernel memory handle (output)
163 * @mcaddr: MC address (output)
164 *
165 * Return: 0 on success, -errno otherwise
166 */
167typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
168 uint64_t min_offset, uint64_t max_offset,
169 cgs_handle_t *kmem_handle, uint64_t *mcaddr);
170
171/**
172 * cgs_gunmap_kmem() - unmap kernel memory
173 * @cgs_device: opaque device handle
174 * @kmem_handle: kernel memory handle returned by gmap_kmem
175 *
176 * Return: 0 on success, -errno otherwise
177 */
178typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
179
180/**
181 * cgs_alloc_gpu_mem() - Allocate GPU memory
182 * @cgs_device: opaque device handle
183 * @type: memory type
184 * @size: size in bytes
185 * @align: alignment in bytes
186 * @min_offset: minimum offset from start of heap
187 * @max_offset: maximum offset from start of heap
188 * @handle: memory handle (output)
189 *
190 * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
191 * memory allocation. This guarantees that the MC address returned by
192 * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
193 * FB memory types may be GART mapped depending on memory
194 * fragmentation and memory allocator policies.
195 *
196 * If min/max_offset are non-0, the allocation will be forced to
197 * reside between these offsets in its respective memory heap. The
198 * base address that the offset relates to, depends on the memory
199 * type.
200 *
201 * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
202 * - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address
203 * - others: undefined, don't use with max_offset
204 *
205 * Return: 0 on success, -errno otherwise
206 */
207typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
208 uint64_t size, uint64_t align,
209 uint64_t min_offset, uint64_t max_offset,
210 cgs_handle_t *handle);
211
212/**
213 * cgs_free_gpu_mem() - Free GPU memory
214 * @cgs_device: opaque device handle
215 * @handle: memory handle returned by alloc or import
216 *
217 * Return: 0 on success, -errno otherwise
218 */
219typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
220
221/**
222 * cgs_gmap_gpu_mem() - GPU-map GPU memory
223 * @cgs_device: opaque device handle
224 * @handle: memory handle returned by alloc or import
225 * @mcaddr: MC address (output)
226 *
227 * Ensures that a buffer is GPU accessible and returns its MC address.
228 *
229 * Return: 0 on success, -errno otherwise
230 */
231typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
232 uint64_t *mcaddr);
233
234/**
235 * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
236 * @cgs_device: opaque device handle
237 * @handle: memory handle returned by alloc or import
238 *
239 * Allows the buffer to be migrated while it's not used by the GPU.
240 *
241 * Return: 0 on success, -errno otherwise
242 */
243typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
244
245/**
246 * cgs_kmap_gpu_mem() - Kernel-map GPU memory
247 *
248 * @cgs_device: opaque device handle
249 * @handle: memory handle returned by alloc or import
250 * @map: Kernel virtual address the memory was mapped to (output)
251 *
252 * Return: 0 on success, -errno otherwise
253 */
254typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
255 void **map);
256
257/**
258 * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
259 * @cgs_device: opaque device handle
260 * @handle: memory handle returned by alloc or import
261 *
262 * Return: 0 on success, -errno otherwise
263 */
264typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
265
266/**
267 * cgs_read_register() - Read an MMIO register
268 * @cgs_device: opaque device handle
269 * @offset: register offset
270 *
271 * Return: register value
272 */
273typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
274
275/**
276 * cgs_write_register() - Write an MMIO register
277 * @cgs_device: opaque device handle
278 * @offset: register offset
279 * @value: register value
280 */
281typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
282 uint32_t value);
283
284/**
285 * cgs_read_ind_register() - Read an indirect register
286 * @cgs_device: opaque device handle
287 * @offset: register offset
288 *
289 * Return: register value
290 */
291typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
292 unsigned index);
293
294/**
295 * cgs_write_ind_register() - Write an indirect register
296 * @cgs_device: opaque device handle
297 * @offset: register offset
298 * @value: register value
299 */
300typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
301 unsigned index, uint32_t value);
302
303/**
304 * cgs_read_pci_config_byte() - Read byte from PCI configuration space
305 * @cgs_device: opaque device handle
306 * @addr: address
307 *
308 * Return: Value read
309 */
310typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
311
312/**
313 * cgs_read_pci_config_word() - Read word from PCI configuration space
314 * @cgs_device: opaque device handle
315 * @addr: address, must be word-aligned
316 *
317 * Return: Value read
318 */
319typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
320
321/**
322 * cgs_read_pci_config_dword() - Read dword from PCI configuration space
323 * @cgs_device: opaque device handle
324 * @addr: address, must be dword-aligned
325 *
326 * Return: Value read
327 */
328typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
329 unsigned addr);
330
331/**
332 * cgs_write_pci_config_byte() - Write byte to PCI configuration space
333 * @cgs_device: opaque device handle
334 * @addr: address
335 * @value: value to write
336 */
337typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
338 uint8_t value);
339
340/**
341 * cgs_write_pci_config_word() - Write byte to PCI configuration space
342 * @cgs_device: opaque device handle
343 * @addr: address, must be word-aligned
344 * @value: value to write
345 */
346typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
347 uint16_t value);
348
349/**
350 * cgs_write_pci_config_dword() - Write byte to PCI configuration space
351 * @cgs_device: opaque device handle
352 * @addr: address, must be dword-aligned
353 * @value: value to write
354 */
355typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
356 uint32_t value);
357
358/**
359 * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
360 * @cgs_device: opaque device handle
361 * @table: data table index
362 * @size: size of the table (output, may be NULL)
363 * @frev: table format revision (output, may be NULL)
364 * @crev: table content revision (output, may be NULL)
365 *
366 * Return: Pointer to start of the table, or NULL on failure
367 */
368typedef const void *(*cgs_atom_get_data_table_t)(
369 void *cgs_device, unsigned table,
370 uint16_t *size, uint8_t *frev, uint8_t *crev);
371
372/**
373 * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
374 * @cgs_device: opaque device handle
375 * @table: data table index
376 * @frev: table format revision (output, may be NULL)
377 * @crev: table content revision (output, may be NULL)
378 *
379 * Return: 0 on success, -errno otherwise
380 */
381typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
382 uint8_t *frev, uint8_t *crev);
383
384/**
385 * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
386 * @cgs_device: opaque device handle
387 * @table: command table index
388 * @args: arguments
389 *
390 * Return: 0 on success, -errno otherwise
391 */
392typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
393 unsigned table, void *args);
394
395/**
396 * cgs_create_pm_request() - Create a power management request
397 * @cgs_device: opaque device handle
398 * @request: handle of created PM request (output)
399 *
400 * Return: 0 on success, -errno otherwise
401 */
402typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
403
404/**
405 * cgs_destroy_pm_request() - Destroy a power management request
406 * @cgs_device: opaque device handle
407 * @request: handle of created PM request
408 *
409 * Return: 0 on success, -errno otherwise
410 */
411typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
412
413/**
414 * cgs_set_pm_request() - Activate or deactiveate a PM request
415 * @cgs_device: opaque device handle
416 * @request: PM request handle
417 * @active: 0 = deactivate, non-0 = activate
418 *
419 * While a PM request is active, its minimum clock requests are taken
420 * into account as the requested engines are powered up. When the
421 * request is inactive, the engines may be powered down and clocks may
422 * be lower, depending on other PM requests by other driver
423 * components.
424 *
425 * Return: 0 on success, -errno otherwise
426 */
427typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
428 int active);
429
430/**
431 * cgs_pm_request_clock() - Request a minimum frequency for a specific clock
432 * @cgs_device: opaque device handle
433 * @request: PM request handle
434 * @clock: which clock?
435 * @freq: requested min. frequency in 10KHz units (0 to clear request)
436 *
437 * Return: 0 on success, -errno otherwise
438 */
439typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
440 enum cgs_clock clock, unsigned freq);
441
442/**
443 * cgs_pm_request_engine() - Request an engine to be powered up
444 * @cgs_device: opaque device handle
445 * @request: PM request handle
446 * @engine: which engine?
447 * @powered: 0 = powered down, non-0 = powered up
448 *
449 * Return: 0 on success, -errno otherwise
450 */
451typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
452 enum cgs_engine engine, int powered);
453
454/**
455 * cgs_pm_query_clock_limits() - Query clock frequency limits
456 * @cgs_device: opaque device handle
457 * @clock: which clock?
458 * @limits: clock limits
459 *
460 * Return: 0 on success, -errno otherwise
461 */
462typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
463 enum cgs_clock clock,
464 struct cgs_clock_limits *limits);
465
466/**
467 * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
468 * @cgs_device: opaque device handle
469 * @mask: bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
470 * @voltages: pointer to array of voltage values in 1mV units
471 *
472 * Return: 0 on success, -errno otherwise
473 */
474typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
475 const uint32_t *voltages);
476/**
477 * cgs_get_firmware_info - Get the firmware information from core driver
478 * @cgs_device: opaque device handle
479 * @type: the firmware type
480 * @info: returend firmware information
481 *
482 * Return: 0 on success, -errno otherwise
483 */
484typedef int (*cgs_get_firmware_info)(void *cgs_device,
485 enum cgs_ucode_id type,
486 struct cgs_firmware_info *info);
487
488typedef int(*cgs_set_powergating_state)(void *cgs_device,
489 enum amd_ip_block_type block_type,
490 enum amd_powergating_state state);
491
492typedef int(*cgs_set_clockgating_state)(void *cgs_device,
493 enum amd_ip_block_type block_type,
494 enum amd_clockgating_state state);
495
496struct cgs_ops {
497 /* memory management calls (similar to KFD interface) */
498 cgs_gpu_mem_info_t gpu_mem_info;
499 cgs_gmap_kmem_t gmap_kmem;
500 cgs_gunmap_kmem_t gunmap_kmem;
501 cgs_alloc_gpu_mem_t alloc_gpu_mem;
502 cgs_free_gpu_mem_t free_gpu_mem;
503 cgs_gmap_gpu_mem_t gmap_gpu_mem;
504 cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
505 cgs_kmap_gpu_mem_t kmap_gpu_mem;
506 cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
507 /* MMIO access */
508 cgs_read_register_t read_register;
509 cgs_write_register_t write_register;
510 cgs_read_ind_register_t read_ind_register;
511 cgs_write_ind_register_t write_ind_register;
512 /* PCI configuration space access */
513 cgs_read_pci_config_byte_t read_pci_config_byte;
514 cgs_read_pci_config_word_t read_pci_config_word;
515 cgs_read_pci_config_dword_t read_pci_config_dword;
516 cgs_write_pci_config_byte_t write_pci_config_byte;
517 cgs_write_pci_config_word_t write_pci_config_word;
518 cgs_write_pci_config_dword_t write_pci_config_dword;
519 /* ATOM BIOS */
520 cgs_atom_get_data_table_t atom_get_data_table;
521 cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
522 cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
523 /* Power management */
524 cgs_create_pm_request_t create_pm_request;
525 cgs_destroy_pm_request_t destroy_pm_request;
526 cgs_set_pm_request_t set_pm_request;
527 cgs_pm_request_clock_t pm_request_clock;
528 cgs_pm_request_engine_t pm_request_engine;
529 cgs_pm_query_clock_limits_t pm_query_clock_limits;
530 cgs_set_camera_voltages_t set_camera_voltages;
531 /* Firmware Info */
532 cgs_get_firmware_info get_firmware_info;
533 /* cg pg interface*/
534 cgs_set_powergating_state set_powergating_state;
535 cgs_set_clockgating_state set_clockgating_state;
536 /* ACPI (TODO) */
537};
538
539struct cgs_os_ops; /* To be define in OS-specific CGS header */
540
541struct cgs_device
542{
543 const struct cgs_ops *ops;
544 const struct cgs_os_ops *os_ops;
545 /* to be embedded at the start of driver private structure */
546};
547
548/* Convenience macros that make CGS indirect function calls look like
549 * normal function calls */
550#define CGS_CALL(func,dev,...) \
551 (((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
552#define CGS_OS_CALL(func,dev,...) \
553 (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
554
555#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size) \
556 CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
557#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) \
558 CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
559#define cgs_gunmap_kmem(dev,kmem_handle) \
560 CGS_CALL(gunmap_kmem,dev,keme_handle)
561#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
562 CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
563#define cgs_free_gpu_mem(dev,handle) \
564 CGS_CALL(free_gpu_mem,dev,handle)
565#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
566 CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
567#define cgs_gunmap_gpu_mem(dev,handle) \
568 CGS_CALL(gunmap_gpu_mem,dev,handle)
569#define cgs_kmap_gpu_mem(dev,handle,map) \
570 CGS_CALL(kmap_gpu_mem,dev,handle,map)
571#define cgs_kunmap_gpu_mem(dev,handle) \
572 CGS_CALL(kunmap_gpu_mem,dev,handle)
573
574#define cgs_read_register(dev,offset) \
575 CGS_CALL(read_register,dev,offset)
576#define cgs_write_register(dev,offset,value) \
577 CGS_CALL(write_register,dev,offset,value)
578#define cgs_read_ind_register(dev,space,index) \
579 CGS_CALL(read_ind_register,dev,space,index)
580#define cgs_write_ind_register(dev,space,index,value) \
581 CGS_CALL(write_ind_register,dev,space,index,value)
582
583#define cgs_read_pci_config_byte(dev,addr) \
584 CGS_CALL(read_pci_config_byte,dev,addr)
585#define cgs_read_pci_config_word(dev,addr) \
586 CGS_CALL(read_pci_config_word,dev,addr)
587#define cgs_read_pci_config_dword(dev,addr) \
588 CGS_CALL(read_pci_config_dword,dev,addr)
589#define cgs_write_pci_config_byte(dev,addr,value) \
590 CGS_CALL(write_pci_config_byte,dev,addr,value)
591#define cgs_write_pci_config_word(dev,addr,value) \
592 CGS_CALL(write_pci_config_word,dev,addr,value)
593#define cgs_write_pci_config_dword(dev,addr,value) \
594 CGS_CALL(write_pci_config_dword,dev,addr,value)
595
596#define cgs_atom_get_data_table(dev,table,size,frev,crev) \
597 CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
598#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \
599 CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
600#define cgs_atom_exec_cmd_table(dev,table,args) \
601 CGS_CALL(atom_exec_cmd_table,dev,table,args)
602
603#define cgs_create_pm_request(dev,request) \
604 CGS_CALL(create_pm_request,dev,request)
605#define cgs_destroy_pm_request(dev,request) \
606 CGS_CALL(destroy_pm_request,dev,request)
607#define cgs_set_pm_request(dev,request,active) \
608 CGS_CALL(set_pm_request,dev,request,active)
609#define cgs_pm_request_clock(dev,request,clock,freq) \
610 CGS_CALL(pm_request_clock,dev,request,clock,freq)
611#define cgs_pm_request_engine(dev,request,engine,powered) \
612 CGS_CALL(pm_request_engine,dev,request,engine,powered)
613#define cgs_pm_query_clock_limits(dev,clock,limits) \
614 CGS_CALL(pm_query_clock_limits,dev,clock,limits)
615#define cgs_set_camera_voltages(dev,mask,voltages) \
616 CGS_CALL(set_camera_voltages,dev,mask,voltages)
617#define cgs_get_firmware_info(dev, type, info) \
618 CGS_CALL(get_firmware_info, dev, type, info)
619#define cgs_set_powergating_state(dev, block_type, state) \
620 CGS_CALL(set_powergating_state, dev, block_type, state)
621#define cgs_set_clockgating_state(dev, block_type, state) \
622 CGS_CALL(set_clockgating_state, dev, block_type, state)
623
624#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
new file mode 100644
index 000000000000..488642f08267
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#ifndef _CGS_LINUX_H
25#define _CGS_LINUX_H
26
27#include "cgs_common.h"
28
29/**
30 * cgs_import_gpu_mem() - Import dmabuf handle
31 * @cgs_device: opaque device handle
32 * @dmabuf_fd: DMABuf file descriptor
33 * @handle: memory handle (output)
34 *
35 * Must be called in the process context that dmabuf_fd belongs to.
36 *
37 * Return: 0 on success, -errno otherwise
38 */
39typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
40 cgs_handle_t *handle);
41
42/**
43 * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
44 * @private_data: private data provided to cgs_add_irq_source
45 * @src_id: interrupt source ID
46 * @type: interrupt type
47 * @enabled: 0 = disable source, non-0 = enable source
48 *
49 * Return: 0 on success, -errno otherwise
50 */
51typedef int (*cgs_irq_source_set_func_t)(void *private_data,
52 unsigned src_id, unsigned type,
53 int enabled);
54
55/**
56 * cgs_irq_handler_func() - Interrupt handler callback
57 * @private_data: private data provided to cgs_add_irq_source
58 * @src_id: interrupt source ID
59 * @iv_entry: pointer to raw ih ring entry
60 *
61 * This callback runs in interrupt context.
62 *
63 * Return: 0 on success, -errno otherwise
64 */
65typedef int (*cgs_irq_handler_func_t)(void *private_data,
66 unsigned src_id, const uint32_t *iv_entry);
67
68/**
69 * cgs_add_irq_source() - Add an IRQ source
70 * @cgs_device: opaque device handle
71 * @src_id: interrupt source ID
72 * @num_types: number of interrupt types that can be independently enabled
73 * @set: callback function to enable/disable an interrupt type
74 * @handler: interrupt handler callback
75 * @private_data: private data to pass to callback functions
76 *
77 * The same IRQ source can be added only once. Adding an IRQ source
78 * indicates ownership of that IRQ source and all its IRQ types.
79 *
80 * Return: 0 on success, -errno otherwise
81 */
82typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
83 unsigned num_types,
84 cgs_irq_source_set_func_t set,
85 cgs_irq_handler_func_t handler,
86 void *private_data);
87
88/**
89 * cgs_irq_get() - Request enabling an IRQ source and type
90 * @cgs_device: opaque device handle
91 * @src_id: interrupt source ID
92 * @type: interrupt type
93 *
94 * cgs_irq_get and cgs_irq_put calls must be balanced. They count
95 * "references" to IRQ sources.
96 *
97 * Return: 0 on success, -errno otherwise
98 */
99typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
100
101/**
102 * cgs_irq_put() - Indicate IRQ source is no longer needed
103 * @cgs_device: opaque device handle
104 * @src_id: interrupt source ID
105 * @type: interrupt type
106 *
107 * cgs_irq_get and cgs_irq_put calls must be balanced. They count
108 * "references" to IRQ sources. Even after cgs_irq_put is called, the
109 * IRQ handler may still be called if there are more refecences to
110 * the IRQ source.
111 *
112 * Return: 0 on success, -errno otherwise
113 */
114typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
115
116struct cgs_os_ops {
117 cgs_import_gpu_mem_t import_gpu_mem;
118
119 /* IRQ handling */
120 cgs_add_irq_source_t add_irq_source;
121 cgs_irq_get_t irq_get;
122 cgs_irq_put_t irq_put;
123};
124
125#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
126 CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
127#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
128 CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
129 private_data)
130#define cgs_irq_get(dev,src_id,type) \
131 CGS_OS_CALL(irq_get,dev,src_id,type)
132#define cgs_irq_put(dev,src_id,type) \
133 CGS_OS_CALL(irq_put,dev,src_id,type)
134
135#endif /* _CGS_LINUX_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 0030f726e68c..ee6978b30b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -146,6 +146,9 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
146#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable. 146#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
147#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature. 147#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
148#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000 148#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
149#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x01000000
150#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE 0x02000000
151#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000
149 152
150typedef struct _ATOM_PPLIB_POWERPLAYTABLE 153typedef struct _ATOM_PPLIB_POWERPLAYTABLE
151{ 154{
@@ -673,7 +676,8 @@ typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
673 UCHAR revid; 676 UCHAR revid;
674 ATOM_PowerTune_Table power_tune_table; 677 ATOM_PowerTune_Table power_tune_table;
675 USHORT usMaximumPowerDeliveryLimit; 678 USHORT usMaximumPowerDeliveryLimit;
676 USHORT usReserve[7]; 679 USHORT usTjMax;
680 USHORT usReserve[6];
677} ATOM_PPLIB_POWERTUNE_Table_V1; 681} ATOM_PPLIB_POWERTUNE_Table_V1;
678 682
679#define ATOM_PPM_A_A 1 683#define ATOM_PPM_A_A 1
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
new file mode 100644
index 000000000000..265d3e2f63cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -0,0 +1,462 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30/* Initialize a given run queue struct */
31static void amd_sched_rq_init(struct amd_sched_rq *rq)
32{
33 INIT_LIST_HEAD(&rq->entities);
34 mutex_init(&rq->lock);
35 rq->current_entity = NULL;
36}
37
38static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39 struct amd_sched_entity *entity)
40{
41 mutex_lock(&rq->lock);
42 list_add_tail(&entity->list, &rq->entities);
43 mutex_unlock(&rq->lock);
44}
45
46static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
48{
49 mutex_lock(&rq->lock);
50 list_del_init(&entity->list);
51 if (rq->current_entity == entity)
52 rq->current_entity = NULL;
53 mutex_unlock(&rq->lock);
54}
55
56/**
57 * Select next entity from a specified run queue with round robin policy.
58 * It could return the same entity as current one if current is the only
59 * available one in the queue. Return NULL if nothing available.
60 */
61static struct amd_sched_entity *
62amd_sched_rq_select_entity(struct amd_sched_rq *rq)
63{
64 struct amd_sched_entity *entity = rq->current_entity;
65
66 if (entity) {
67 list_for_each_entry_continue(entity, &rq->entities, list) {
68 if (!kfifo_is_empty(&entity->job_queue)) {
69 rq->current_entity = entity;
70 return rq->current_entity;
71 }
72 }
73 }
74
75 list_for_each_entry(entity, &rq->entities, list) {
76
77 if (!kfifo_is_empty(&entity->job_queue)) {
78 rq->current_entity = entity;
79 return rq->current_entity;
80 }
81
82 if (entity == rq->current_entity)
83 break;
84 }
85
86 return NULL;
87}
88
89/**
90 * Note: This function should only been called inside scheduler main
91 * function for thread safety, there is no other protection here.
92 * return ture if scheduler has something ready to run.
93 *
94 * For active_hw_rq, there is only one producer(scheduler thread) and
95 * one consumer(ISR). It should be safe to use this function in scheduler
96 * main thread to decide whether to continue emit more IBs.
97*/
98static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
99{
100 unsigned long flags;
101 bool full;
102
103 spin_lock_irqsave(&sched->queue_lock, flags);
104 full = atomic64_read(&sched->hw_rq_count) <
105 sched->hw_submission_limit ? true : false;
106 spin_unlock_irqrestore(&sched->queue_lock, flags);
107
108 return full;
109}
110
111/**
112 * Select next entity from the kernel run queue, if not available,
113 * return null.
114*/
115static struct amd_sched_entity *
116kernel_rq_select_context(struct amd_gpu_scheduler *sched)
117{
118 struct amd_sched_entity *sched_entity;
119 struct amd_sched_rq *rq = &sched->kernel_rq;
120
121 mutex_lock(&rq->lock);
122 sched_entity = amd_sched_rq_select_entity(rq);
123 mutex_unlock(&rq->lock);
124 return sched_entity;
125}
126
127/**
128 * Select next entity containing real IB submissions
129*/
130static struct amd_sched_entity *
131select_context(struct amd_gpu_scheduler *sched)
132{
133 struct amd_sched_entity *wake_entity = NULL;
134 struct amd_sched_entity *tmp;
135 struct amd_sched_rq *rq;
136
137 if (!is_scheduler_ready(sched))
138 return NULL;
139
140 /* Kernel run queue has higher priority than normal run queue*/
141 tmp = kernel_rq_select_context(sched);
142 if (tmp != NULL)
143 goto exit;
144
145 rq = &sched->sched_rq;
146 mutex_lock(&rq->lock);
147 tmp = amd_sched_rq_select_entity(rq);
148 mutex_unlock(&rq->lock);
149exit:
150 if (sched->current_entity && (sched->current_entity != tmp))
151 wake_entity = sched->current_entity;
152 sched->current_entity = tmp;
153 if (wake_entity && wake_entity->need_wakeup)
154 wake_up(&wake_entity->wait_queue);
155 return tmp;
156}
157
158/**
159 * Init a context entity used by scheduler when submit to HW ring.
160 *
161 * @sched The pointer to the scheduler
162 * @entity The pointer to a valid amd_sched_entity
163 * @rq The run queue this entity belongs
164 * @kernel If this is an entity for the kernel
165 * @jobs The max number of jobs in the job queue
166 *
167 * return 0 if succeed. negative error code on failure
168*/
169int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
170 struct amd_sched_entity *entity,
171 struct amd_sched_rq *rq,
172 uint32_t jobs)
173{
174 uint64_t seq_ring = 0;
175 char name[20];
176
177 if (!(sched && entity && rq))
178 return -EINVAL;
179
180 memset(entity, 0, sizeof(struct amd_sched_entity));
181 seq_ring = ((uint64_t)sched->ring_id) << 60;
182 spin_lock_init(&entity->lock);
183 entity->belongto_rq = rq;
184 entity->scheduler = sched;
185 init_waitqueue_head(&entity->wait_queue);
186 init_waitqueue_head(&entity->wait_emit);
187 entity->fence_context = fence_context_alloc(1);
188 snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
189 memcpy(entity->name, name, 20);
190 entity->need_wakeup = false;
191 if(kfifo_alloc(&entity->job_queue,
192 jobs * sizeof(void *),
193 GFP_KERNEL))
194 return -EINVAL;
195
196 spin_lock_init(&entity->queue_lock);
197 atomic64_set(&entity->last_queued_v_seq, seq_ring);
198 atomic64_set(&entity->last_signaled_v_seq, seq_ring);
199
200 /* Add the entity to the run queue */
201 amd_sched_rq_add_entity(rq, entity);
202 return 0;
203}
204
205/**
206 * Query if entity is initialized
207 *
208 * @sched Pointer to scheduler instance
209 * @entity The pointer to a valid scheduler entity
210 *
211 * return true if entity is initialized, false otherwise
212*/
213static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
214 struct amd_sched_entity *entity)
215{
216 return entity->scheduler == sched &&
217 entity->belongto_rq != NULL;
218}
219
220static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
221 struct amd_sched_entity *entity)
222{
223 /**
224 * Idle means no pending IBs, and the entity is not
225 * currently being used.
226 */
227 barrier();
228 if ((sched->current_entity != entity) &&
229 kfifo_is_empty(&entity->job_queue))
230 return true;
231
232 return false;
233}
234
235/**
236 * Destroy a context entity
237 *
238 * @sched Pointer to scheduler instance
239 * @entity The pointer to a valid scheduler entity
240 *
241 * return 0 if succeed. negative error code on failure
242 */
243int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
244 struct amd_sched_entity *entity)
245{
246 int r = 0;
247 struct amd_sched_rq *rq = entity->belongto_rq;
248
249 if (!is_context_entity_initialized(sched, entity))
250 return 0;
251 entity->need_wakeup = true;
252 /**
253 * The client will not queue more IBs during this fini, consume existing
254 * queued IBs
255 */
256 r = wait_event_timeout(
257 entity->wait_queue,
258 is_context_entity_idle(sched, entity),
259 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
260 ) ? 0 : -1;
261
262 if (r) {
263 if (entity->is_pending)
264 DRM_INFO("Entity %p is in waiting state during fini,\
265 all pending ibs will be canceled.\n",
266 entity);
267 }
268
269 amd_sched_rq_remove_entity(rq, entity);
270 kfifo_free(&entity->job_queue);
271 return r;
272}
273
274/**
275 * Submit a normal job to the job queue
276 *
277 * @sched The pointer to the scheduler
278 * @c_entity The pointer to amd_sched_entity
279 * @job The pointer to job required to submit
280 * return 0 if succeed. -1 if failed.
281 * -2 indicate queue is full for this client, client should wait untill
282 * scheduler consum some queued command.
283 * -1 other fail.
284*/
285int amd_sched_push_job(struct amd_gpu_scheduler *sched,
286 struct amd_sched_entity *c_entity,
287 void *data,
288 struct amd_sched_fence **fence)
289{
290 struct amd_sched_job *job;
291
292 if (!fence)
293 return -EINVAL;
294 job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL);
295 if (!job)
296 return -ENOMEM;
297 job->sched = sched;
298 job->s_entity = c_entity;
299 job->data = data;
300 *fence = amd_sched_fence_create(c_entity);
301 if ((*fence) == NULL) {
302 kfree(job);
303 return -EINVAL;
304 }
305 fence_get(&(*fence)->base);
306 job->s_fence = *fence;
307 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
308 &c_entity->queue_lock) != sizeof(void *)) {
309 /**
310 * Current context used up all its IB slots
311 * wait here, or need to check whether GPU is hung
312 */
313 schedule();
314 }
315 /* first job wake up scheduler */
316 if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1)
317 wake_up_interruptible(&sched->wait_queue);
318 return 0;
319}
320
321static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
322{
323 struct amd_sched_job *sched_job =
324 container_of(cb, struct amd_sched_job, cb);
325 struct amd_gpu_scheduler *sched;
326 unsigned long flags;
327
328 sched = sched_job->sched;
329 atomic64_set(&sched_job->s_entity->last_signaled_v_seq,
330 sched_job->s_fence->v_seq);
331 amd_sched_fence_signal(sched_job->s_fence);
332 spin_lock_irqsave(&sched->queue_lock, flags);
333 list_del(&sched_job->list);
334 atomic64_dec(&sched->hw_rq_count);
335 spin_unlock_irqrestore(&sched->queue_lock, flags);
336
337 sched->ops->process_job(sched, sched_job);
338 fence_put(&sched_job->s_fence->base);
339 kfree(sched_job);
340 wake_up_interruptible(&sched->wait_queue);
341}
342
343static int amd_sched_main(void *param)
344{
345 int r;
346 struct amd_sched_job *job;
347 struct sched_param sparam = {.sched_priority = 1};
348 struct amd_sched_entity *c_entity = NULL;
349 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
350
351 sched_setscheduler(current, SCHED_FIFO, &sparam);
352
353 while (!kthread_should_stop()) {
354 struct fence *fence;
355
356 wait_event_interruptible(sched->wait_queue,
357 is_scheduler_ready(sched) &&
358 (c_entity = select_context(sched)));
359 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
360 if (r != sizeof(void *))
361 continue;
362 r = sched->ops->prepare_job(sched, c_entity, job);
363 if (!r) {
364 unsigned long flags;
365 spin_lock_irqsave(&sched->queue_lock, flags);
366 list_add_tail(&job->list, &sched->active_hw_rq);
367 atomic64_inc(&sched->hw_rq_count);
368 spin_unlock_irqrestore(&sched->queue_lock, flags);
369 }
370 mutex_lock(&sched->sched_lock);
371 fence = sched->ops->run_job(sched, c_entity, job);
372 if (fence) {
373 r = fence_add_callback(fence, &job->cb,
374 amd_sched_process_job);
375 if (r == -ENOENT)
376 amd_sched_process_job(fence, &job->cb);
377 else if (r)
378 DRM_ERROR("fence add callback failed (%d)\n", r);
379 fence_put(fence);
380 }
381 mutex_unlock(&sched->sched_lock);
382 }
383 return 0;
384}
385
386/**
387 * Create a gpu scheduler
388 *
389 * @device The device context for this scheduler
390 * @ops The backend operations for this scheduler.
391 * @id The scheduler is per ring, here is ring id.
392 * @granularity The minumum ms unit the scheduler will scheduled.
393 * @preemption Indicate whether this ring support preemption, 0 is no.
394 *
395 * return the pointer to scheduler for success, otherwise return NULL
396*/
397struct amd_gpu_scheduler *amd_sched_create(void *device,
398 struct amd_sched_backend_ops *ops,
399 unsigned ring,
400 unsigned granularity,
401 unsigned preemption,
402 unsigned hw_submission)
403{
404 struct amd_gpu_scheduler *sched;
405 char name[20];
406
407 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
408 if (!sched)
409 return NULL;
410
411 sched->device = device;
412 sched->ops = ops;
413 sched->granularity = granularity;
414 sched->ring_id = ring;
415 sched->preemption = preemption;
416 sched->hw_submission_limit = hw_submission;
417 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
418 mutex_init(&sched->sched_lock);
419 spin_lock_init(&sched->queue_lock);
420 amd_sched_rq_init(&sched->sched_rq);
421 amd_sched_rq_init(&sched->kernel_rq);
422
423 init_waitqueue_head(&sched->wait_queue);
424 INIT_LIST_HEAD(&sched->active_hw_rq);
425 atomic64_set(&sched->hw_rq_count, 0);
426 /* Each scheduler will run on a seperate kernel thread */
427 sched->thread = kthread_create(amd_sched_main, sched, name);
428 if (sched->thread) {
429 wake_up_process(sched->thread);
430 return sched;
431 }
432
433 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
434 kfree(sched);
435 return NULL;
436}
437
438/**
439 * Destroy a gpu scheduler
440 *
441 * @sched The pointer to the scheduler
442 *
443 * return 0 if succeed. -1 if failed.
444 */
445int amd_sched_destroy(struct amd_gpu_scheduler *sched)
446{
447 kthread_stop(sched->thread);
448 kfree(sched);
449 return 0;
450}
451
452/**
453 * Get next queued sequence number
454 *
455 * @entity The context entity
456 *
457 * return the next queued sequence number
458*/
459uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity)
460{
461 return atomic64_read(&c_entity->last_queued_v_seq) + 1;
462}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
new file mode 100644
index 000000000000..ceb5918bfbeb
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _GPU_SCHEDULER_H_
25#define _GPU_SCHEDULER_H_
26
27#include <linux/kfifo.h>
28#include <linux/fence.h>
29
30#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
31
32struct amd_gpu_scheduler;
33struct amd_sched_rq;
34
35/**
36 * A scheduler entity is a wrapper around a job queue or a group
37 * of other entities. Entities take turns emitting jobs from their
38 * job queues to corresponding hardware ring based on scheduling
39 * policy.
40*/
41struct amd_sched_entity {
42 struct list_head list;
43 struct amd_sched_rq *belongto_rq;
44 spinlock_t lock;
45 /* the virtual_seq is unique per context per ring */
46 atomic64_t last_queued_v_seq;
47 atomic64_t last_signaled_v_seq;
48 /* the job_queue maintains the jobs submitted by clients */
49 struct kfifo job_queue;
50 spinlock_t queue_lock;
51 struct amd_gpu_scheduler *scheduler;
52 wait_queue_head_t wait_queue;
53 wait_queue_head_t wait_emit;
54 bool is_pending;
55 uint64_t fence_context;
56 char name[20];
57 bool need_wakeup;
58};
59
60/**
61 * Run queue is a set of entities scheduling command submissions for
62 * one specific ring. It implements the scheduling policy that selects
63 * the next entity to emit commands from.
64*/
65struct amd_sched_rq {
66 struct mutex lock;
67 struct list_head entities;
68 struct amd_sched_entity *current_entity;
69};
70
71struct amd_sched_fence {
72 struct fence base;
73 struct fence_cb cb;
74 struct amd_sched_entity *entity;
75 uint64_t v_seq;
76 spinlock_t lock;
77};
78
79struct amd_sched_job {
80 struct list_head list;
81 struct fence_cb cb;
82 struct amd_gpu_scheduler *sched;
83 struct amd_sched_entity *s_entity;
84 void *data;
85 struct amd_sched_fence *s_fence;
86};
87
88extern const struct fence_ops amd_sched_fence_ops;
89static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
90{
91 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
92
93 if (__f->base.ops == &amd_sched_fence_ops)
94 return __f;
95
96 return NULL;
97}
98
99/**
100 * Define the backend operations called by the scheduler,
101 * these functions should be implemented in driver side
102*/
103struct amd_sched_backend_ops {
104 int (*prepare_job)(struct amd_gpu_scheduler *sched,
105 struct amd_sched_entity *c_entity,
106 struct amd_sched_job *job);
107 struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
108 struct amd_sched_entity *c_entity,
109 struct amd_sched_job *job);
110 void (*process_job)(struct amd_gpu_scheduler *sched,
111 struct amd_sched_job *job);
112};
113
114/**
115 * One scheduler is implemented for each hardware ring
116*/
117struct amd_gpu_scheduler {
118 void *device;
119 struct task_struct *thread;
120 struct amd_sched_rq sched_rq;
121 struct amd_sched_rq kernel_rq;
122 struct list_head active_hw_rq;
123 atomic64_t hw_rq_count;
124 struct amd_sched_backend_ops *ops;
125 uint32_t ring_id;
126 uint32_t granularity; /* in ms unit */
127 uint32_t preemption;
128 wait_queue_head_t wait_queue;
129 struct amd_sched_entity *current_entity;
130 struct mutex sched_lock;
131 spinlock_t queue_lock;
132 uint32_t hw_submission_limit;
133};
134
135struct amd_gpu_scheduler *amd_sched_create(void *device,
136 struct amd_sched_backend_ops *ops,
137 uint32_t ring,
138 uint32_t granularity,
139 uint32_t preemption,
140 uint32_t hw_submission);
141int amd_sched_destroy(struct amd_gpu_scheduler *sched);
142
143int amd_sched_push_job(struct amd_gpu_scheduler *sched,
144 struct amd_sched_entity *c_entity,
145 void *data,
146 struct amd_sched_fence **fence);
147
148int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
149 struct amd_sched_entity *entity,
150 struct amd_sched_rq *rq,
151 uint32_t jobs);
152int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
153 struct amd_sched_entity *entity);
154
155uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
156
157struct amd_sched_fence *amd_sched_fence_create(
158 struct amd_sched_entity *s_entity);
159void amd_sched_fence_signal(struct amd_sched_fence *fence);
160
161
162#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 000000000000..a4751598c0b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity)
31{
32 struct amd_sched_fence *fence = NULL;
33 fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
34 if (fence == NULL)
35 return NULL;
36 fence->v_seq = atomic64_inc_return(&s_entity->last_queued_v_seq);
37 fence->entity = s_entity;
38 spin_lock_init(&fence->lock);
39 fence_init(&fence->base, &amd_sched_fence_ops,
40 &fence->lock,
41 s_entity->fence_context,
42 fence->v_seq);
43 return fence;
44}
45
46void amd_sched_fence_signal(struct amd_sched_fence *fence)
47{
48 int ret = fence_signal(&fence->base);
49 if (!ret)
50 FENCE_TRACE(&fence->base, "signaled from irq context\n");
51 else
52 FENCE_TRACE(&fence->base, "was already signaled\n");
53}
54
55static const char *amd_sched_fence_get_driver_name(struct fence *fence)
56{
57 return "amd_sched";
58}
59
60static const char *amd_sched_fence_get_timeline_name(struct fence *f)
61{
62 struct amd_sched_fence *fence = to_amd_sched_fence(f);
63 return (const char *)fence->entity->name;
64}
65
66static bool amd_sched_fence_enable_signaling(struct fence *f)
67{
68 return true;
69}
70
71const struct fence_ops amd_sched_fence_ops = {
72 .get_driver_name = amd_sched_fence_get_driver_name,
73 .get_timeline_name = amd_sched_fence_get_timeline_name,
74 .enable_signaling = amd_sched_fence_enable_signaling,
75 .signaled = NULL,
76 .wait = fence_default_wait,
77 .release = NULL,
78};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7838e731b0de..7d03c51abcb9 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -22,9 +22,9 @@ static /*const*/ struct fb_ops armada_fb_ops = {
22 .owner = THIS_MODULE, 22 .owner = THIS_MODULE,
23 .fb_check_var = drm_fb_helper_check_var, 23 .fb_check_var = drm_fb_helper_check_var,
24 .fb_set_par = drm_fb_helper_set_par, 24 .fb_set_par = drm_fb_helper_set_par,
25 .fb_fillrect = cfb_fillrect, 25 .fb_fillrect = drm_fb_helper_cfb_fillrect,
26 .fb_copyarea = cfb_copyarea, 26 .fb_copyarea = drm_fb_helper_cfb_copyarea,
27 .fb_imageblit = cfb_imageblit, 27 .fb_imageblit = drm_fb_helper_cfb_imageblit,
28 .fb_pan_display = drm_fb_helper_pan_display, 28 .fb_pan_display = drm_fb_helper_pan_display,
29 .fb_blank = drm_fb_helper_blank, 29 .fb_blank = drm_fb_helper_blank,
30 .fb_setcmap = drm_fb_helper_setcmap, 30 .fb_setcmap = drm_fb_helper_setcmap,
@@ -80,18 +80,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
80 if (IS_ERR(dfb)) 80 if (IS_ERR(dfb))
81 return PTR_ERR(dfb); 81 return PTR_ERR(dfb);
82 82
83 info = framebuffer_alloc(0, dev->dev); 83 info = drm_fb_helper_alloc_fbi(fbh);
84 if (!info) { 84 if (IS_ERR(info)) {
85 ret = -ENOMEM; 85 ret = PTR_ERR(info);
86 goto err_fballoc; 86 goto err_fballoc;
87 } 87 }
88 88
89 ret = fb_alloc_cmap(&info->cmap, 256, 0);
90 if (ret) {
91 ret = -ENOMEM;
92 goto err_fbcmap;
93 }
94
95 strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id)); 89 strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
96 info->par = fbh; 90 info->par = fbh;
97 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 91 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
@@ -101,7 +95,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
101 info->screen_size = obj->obj.size; 95 info->screen_size = obj->obj.size;
102 info->screen_base = ptr; 96 info->screen_base = ptr;
103 fbh->fb = &dfb->fb; 97 fbh->fb = &dfb->fb;
104 fbh->fbdev = info; 98
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 99 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 100 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 101
@@ -111,8 +105,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
111 105
112 return 0; 106 return 0;
113 107
114 err_fbcmap:
115 framebuffer_release(info);
116 err_fballoc: 108 err_fballoc:
117 dfb->fb.funcs->destroy(&dfb->fb); 109 dfb->fb.funcs->destroy(&dfb->fb);
118 return ret; 110 return ret;
@@ -171,6 +163,7 @@ int armada_fbdev_init(struct drm_device *dev)
171 163
172 return 0; 164 return 0;
173 err_fb_setup: 165 err_fb_setup:
166 drm_fb_helper_release_fbi(fbh);
174 drm_fb_helper_fini(fbh); 167 drm_fb_helper_fini(fbh);
175 err_fb_helper: 168 err_fb_helper:
176 priv->fbdev = NULL; 169 priv->fbdev = NULL;
@@ -191,14 +184,8 @@ void armada_fbdev_fini(struct drm_device *dev)
191 struct drm_fb_helper *fbh = priv->fbdev; 184 struct drm_fb_helper *fbh = priv->fbdev;
192 185
193 if (fbh) { 186 if (fbh) {
194 struct fb_info *info = fbh->fbdev; 187 drm_fb_helper_unregister_fbi(fbh);
195 188 drm_fb_helper_release_fbi(fbh);
196 if (info) {
197 unregister_framebuffer(info);
198 if (info->cmap.len)
199 fb_dealloc_cmap(&info->cmap);
200 framebuffer_release(info);
201 }
202 189
203 drm_fb_helper_fini(fbh); 190 drm_fb_helper_fini(fbh);
204 191
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index ff68eefae273..f31db28a684b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -125,7 +125,7 @@ static void ast_fillrect(struct fb_info *info,
125 const struct fb_fillrect *rect) 125 const struct fb_fillrect *rect)
126{ 126{
127 struct ast_fbdev *afbdev = info->par; 127 struct ast_fbdev *afbdev = info->par;
128 sys_fillrect(info, rect); 128 drm_fb_helper_sys_fillrect(info, rect);
129 ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width, 129 ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
130 rect->height); 130 rect->height);
131} 131}
@@ -134,7 +134,7 @@ static void ast_copyarea(struct fb_info *info,
134 const struct fb_copyarea *area) 134 const struct fb_copyarea *area)
135{ 135{
136 struct ast_fbdev *afbdev = info->par; 136 struct ast_fbdev *afbdev = info->par;
137 sys_copyarea(info, area); 137 drm_fb_helper_sys_copyarea(info, area);
138 ast_dirty_update(afbdev, area->dx, area->dy, area->width, 138 ast_dirty_update(afbdev, area->dx, area->dy, area->width,
139 area->height); 139 area->height);
140} 140}
@@ -143,7 +143,7 @@ static void ast_imageblit(struct fb_info *info,
143 const struct fb_image *image) 143 const struct fb_image *image)
144{ 144{
145 struct ast_fbdev *afbdev = info->par; 145 struct ast_fbdev *afbdev = info->par;
146 sys_imageblit(info, image); 146 drm_fb_helper_sys_imageblit(info, image);
147 ast_dirty_update(afbdev, image->dx, image->dy, image->width, 147 ast_dirty_update(afbdev, image->dx, image->dy, image->width,
148 image->height); 148 image->height);
149} 149}
@@ -193,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper,
193 struct drm_framebuffer *fb; 193 struct drm_framebuffer *fb;
194 struct fb_info *info; 194 struct fb_info *info;
195 int size, ret; 195 int size, ret;
196 struct device *device = &dev->pdev->dev;
197 void *sysram; 196 void *sysram;
198 struct drm_gem_object *gobj = NULL; 197 struct drm_gem_object *gobj = NULL;
199 struct ast_bo *bo = NULL; 198 struct ast_bo *bo = NULL;
@@ -217,40 +216,28 @@ static int astfb_create(struct drm_fb_helper *helper,
217 if (!sysram) 216 if (!sysram)
218 return -ENOMEM; 217 return -ENOMEM;
219 218
220 info = framebuffer_alloc(0, device); 219 info = drm_fb_helper_alloc_fbi(helper);
221 if (!info) { 220 if (IS_ERR(info)) {
222 ret = -ENOMEM; 221 ret = PTR_ERR(info);
223 goto out; 222 goto err_free_vram;
224 } 223 }
225 info->par = afbdev; 224 info->par = afbdev;
226 225
227 ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj); 226 ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
228 if (ret) 227 if (ret)
229 goto out; 228 goto err_release_fbi;
230 229
231 afbdev->sysram = sysram; 230 afbdev->sysram = sysram;
232 afbdev->size = size; 231 afbdev->size = size;
233 232
234 fb = &afbdev->afb.base; 233 fb = &afbdev->afb.base;
235 afbdev->helper.fb = fb; 234 afbdev->helper.fb = fb;
236 afbdev->helper.fbdev = info;
237 235
238 strcpy(info->fix.id, "astdrmfb"); 236 strcpy(info->fix.id, "astdrmfb");
239 237
240 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 238 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
241 info->fbops = &astfb_ops; 239 info->fbops = &astfb_ops;
242 240
243 ret = fb_alloc_cmap(&info->cmap, 256, 0);
244 if (ret) {
245 ret = -ENOMEM;
246 goto out;
247 }
248
249 info->apertures = alloc_apertures(1);
250 if (!info->apertures) {
251 ret = -ENOMEM;
252 goto out;
253 }
254 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); 241 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
255 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 242 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
256 243
@@ -266,7 +253,11 @@ static int astfb_create(struct drm_fb_helper *helper,
266 fb->width, fb->height); 253 fb->width, fb->height);
267 254
268 return 0; 255 return 0;
269out: 256
257err_release_fbi:
258 drm_fb_helper_release_fbi(helper);
259err_free_vram:
260 vfree(afbdev->sysram);
270 return ret; 261 return ret;
271} 262}
272 263
@@ -297,15 +288,10 @@ static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
297static void ast_fbdev_destroy(struct drm_device *dev, 288static void ast_fbdev_destroy(struct drm_device *dev,
298 struct ast_fbdev *afbdev) 289 struct ast_fbdev *afbdev)
299{ 290{
300 struct fb_info *info;
301 struct ast_framebuffer *afb = &afbdev->afb; 291 struct ast_framebuffer *afb = &afbdev->afb;
302 if (afbdev->helper.fbdev) { 292
303 info = afbdev->helper.fbdev; 293 drm_fb_helper_unregister_fbi(&afbdev->helper);
304 unregister_framebuffer(info); 294 drm_fb_helper_release_fbi(&afbdev->helper);
305 if (info->cmap.len)
306 fb_dealloc_cmap(&info->cmap);
307 framebuffer_release(info);
308 }
309 295
310 if (afb->obj) { 296 if (afb->obj) {
311 drm_gem_object_unreference_unlocked(afb->obj); 297 drm_gem_object_unreference_unlocked(afb->obj);
@@ -377,5 +363,5 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
377 if (!ast->fbdev) 363 if (!ast->fbdev)
378 return; 364 return;
379 365
380 fb_set_suspend(ast->fbdev->helper.fbdev, state); 366 drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
381} 367}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc93382..838217f8ce7d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -571,24 +571,18 @@ ast_dumb_mmap_offset(struct drm_file *file,
571 uint64_t *offset) 571 uint64_t *offset)
572{ 572{
573 struct drm_gem_object *obj; 573 struct drm_gem_object *obj;
574 int ret;
575 struct ast_bo *bo; 574 struct ast_bo *bo;
576 575
577 mutex_lock(&dev->struct_mutex);
578 obj = drm_gem_object_lookup(dev, file, handle); 576 obj = drm_gem_object_lookup(dev, file, handle);
579 if (obj == NULL) { 577 if (obj == NULL)
580 ret = -ENOENT; 578 return -ENOENT;
581 goto out_unlock;
582 }
583 579
584 bo = gem_to_ast_bo(obj); 580 bo = gem_to_ast_bo(obj);
585 *offset = ast_bo_mmap_offset(bo); 581 *offset = ast_bo_mmap_offset(bo);
586 582
587 drm_gem_object_unreference(obj); 583 drm_gem_object_unreference_unlocked(obj);
588 ret = 0; 584
589out_unlock: 585 return 0;
590 mutex_unlock(&dev->struct_mutex);
591 return ret;
592 586
593} 587}
594 588
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 6fad1f9648f3..8bc62ec407f9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -29,6 +29,115 @@
29 29
30#define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8 30#define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8
31 31
32static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
33 {
34 .name = "base",
35 .formats = &atmel_hlcdc_plane_rgb_formats,
36 .regs_offset = 0x40,
37 .id = 0,
38 .type = ATMEL_HLCDC_BASE_LAYER,
39 .nconfigs = 5,
40 .layout = {
41 .xstride = { 2 },
42 .default_color = 3,
43 .general_config = 4,
44 },
45 },
46};
47
48static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
49 .min_width = 0,
50 .min_height = 0,
51 .max_width = 1280,
52 .max_height = 860,
53 .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
54 .layers = atmel_hlcdc_at91sam9n12_layers,
55};
56
57static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
58 {
59 .name = "base",
60 .formats = &atmel_hlcdc_plane_rgb_formats,
61 .regs_offset = 0x40,
62 .id = 0,
63 .type = ATMEL_HLCDC_BASE_LAYER,
64 .nconfigs = 5,
65 .layout = {
66 .xstride = { 2 },
67 .default_color = 3,
68 .general_config = 4,
69 .disc_pos = 5,
70 .disc_size = 6,
71 },
72 },
73 {
74 .name = "overlay1",
75 .formats = &atmel_hlcdc_plane_rgb_formats,
76 .regs_offset = 0x100,
77 .id = 1,
78 .type = ATMEL_HLCDC_OVERLAY_LAYER,
79 .nconfigs = 10,
80 .layout = {
81 .pos = 2,
82 .size = 3,
83 .xstride = { 4 },
84 .pstride = { 5 },
85 .default_color = 6,
86 .chroma_key = 7,
87 .chroma_key_mask = 8,
88 .general_config = 9,
89 },
90 },
91 {
92 .name = "high-end-overlay",
93 .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
94 .regs_offset = 0x280,
95 .id = 2,
96 .type = ATMEL_HLCDC_OVERLAY_LAYER,
97 .nconfigs = 17,
98 .layout = {
99 .pos = 2,
100 .size = 3,
101 .memsize = 4,
102 .xstride = { 5, 7 },
103 .pstride = { 6, 8 },
104 .default_color = 9,
105 .chroma_key = 10,
106 .chroma_key_mask = 11,
107 .general_config = 12,
108 .csc = 14,
109 },
110 },
111 {
112 .name = "cursor",
113 .formats = &atmel_hlcdc_plane_rgb_formats,
114 .regs_offset = 0x340,
115 .id = 3,
116 .type = ATMEL_HLCDC_CURSOR_LAYER,
117 .nconfigs = 10,
118 .max_width = 128,
119 .max_height = 128,
120 .layout = {
121 .pos = 2,
122 .size = 3,
123 .xstride = { 4 },
124 .default_color = 6,
125 .chroma_key = 7,
126 .chroma_key_mask = 8,
127 .general_config = 9,
128 },
129 },
130};
131
132static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
133 .min_width = 0,
134 .min_height = 0,
135 .max_width = 800,
136 .max_height = 600,
137 .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
138 .layers = atmel_hlcdc_at91sam9x5_layers,
139};
140
32static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { 141static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
33 { 142 {
34 .name = "base", 143 .name = "base",
@@ -132,11 +241,105 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
132 .layers = atmel_hlcdc_sama5d3_layers, 241 .layers = atmel_hlcdc_sama5d3_layers,
133}; 242};
134 243
244static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
245 {
246 .name = "base",
247 .formats = &atmel_hlcdc_plane_rgb_formats,
248 .regs_offset = 0x40,
249 .id = 0,
250 .type = ATMEL_HLCDC_BASE_LAYER,
251 .nconfigs = 7,
252 .layout = {
253 .xstride = { 2 },
254 .default_color = 3,
255 .general_config = 4,
256 .disc_pos = 5,
257 .disc_size = 6,
258 },
259 },
260 {
261 .name = "overlay1",
262 .formats = &atmel_hlcdc_plane_rgb_formats,
263 .regs_offset = 0x140,
264 .id = 1,
265 .type = ATMEL_HLCDC_OVERLAY_LAYER,
266 .nconfigs = 10,
267 .layout = {
268 .pos = 2,
269 .size = 3,
270 .xstride = { 4 },
271 .pstride = { 5 },
272 .default_color = 6,
273 .chroma_key = 7,
274 .chroma_key_mask = 8,
275 .general_config = 9,
276 },
277 },
278 {
279 .name = "overlay2",
280 .formats = &atmel_hlcdc_plane_rgb_formats,
281 .regs_offset = 0x240,
282 .id = 2,
283 .type = ATMEL_HLCDC_OVERLAY_LAYER,
284 .nconfigs = 10,
285 .layout = {
286 .pos = 2,
287 .size = 3,
288 .xstride = { 4 },
289 .pstride = { 5 },
290 .default_color = 6,
291 .chroma_key = 7,
292 .chroma_key_mask = 8,
293 .general_config = 9,
294 },
295 },
296 {
297 .name = "high-end-overlay",
298 .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
299 .regs_offset = 0x340,
300 .id = 3,
301 .type = ATMEL_HLCDC_OVERLAY_LAYER,
302 .nconfigs = 42,
303 .layout = {
304 .pos = 2,
305 .size = 3,
306 .memsize = 4,
307 .xstride = { 5, 7 },
308 .pstride = { 6, 8 },
309 .default_color = 9,
310 .chroma_key = 10,
311 .chroma_key_mask = 11,
312 .general_config = 12,
313 .csc = 14,
314 },
315 },
316};
317
318static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
319 .min_width = 0,
320 .min_height = 0,
321 .max_width = 2048,
322 .max_height = 2048,
323 .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
324 .layers = atmel_hlcdc_sama5d4_layers,
325};
135static const struct of_device_id atmel_hlcdc_of_match[] = { 326static const struct of_device_id atmel_hlcdc_of_match[] = {
136 { 327 {
328 .compatible = "atmel,at91sam9n12-hlcdc",
329 .data = &atmel_hlcdc_dc_at91sam9n12,
330 },
331 {
332 .compatible = "atmel,at91sam9x5-hlcdc",
333 .data = &atmel_hlcdc_dc_at91sam9x5,
334 },
335 {
137 .compatible = "atmel,sama5d3-hlcdc", 336 .compatible = "atmel,sama5d3-hlcdc",
138 .data = &atmel_hlcdc_dc_sama5d3, 337 .data = &atmel_hlcdc_dc_sama5d3,
139 }, 338 },
339 {
340 .compatible = "atmel,sama5d4-hlcdc",
341 .data = &atmel_hlcdc_dc_sama5d4,
342 },
140 { /* sentinel */ }, 343 { /* sentinel */ },
141}; 344};
142 345
@@ -485,7 +688,9 @@ static const struct file_operations fops = {
485}; 688};
486 689
487static struct drm_driver atmel_hlcdc_dc_driver = { 690static struct drm_driver atmel_hlcdc_dc_driver = {
488 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, 691 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
692 DRIVER_MODESET | DRIVER_PRIME |
693 DRIVER_ATOMIC,
489 .preclose = atmel_hlcdc_dc_preclose, 694 .preclose = atmel_hlcdc_dc_preclose,
490 .lastclose = atmel_hlcdc_dc_lastclose, 695 .lastclose = atmel_hlcdc_dc_lastclose,
491 .irq_handler = atmel_hlcdc_dc_irq_handler, 696 .irq_handler = atmel_hlcdc_dc_irq_handler,
@@ -497,6 +702,15 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
497 .disable_vblank = atmel_hlcdc_dc_disable_vblank, 702 .disable_vblank = atmel_hlcdc_dc_disable_vblank,
498 .gem_free_object = drm_gem_cma_free_object, 703 .gem_free_object = drm_gem_cma_free_object,
499 .gem_vm_ops = &drm_gem_cma_vm_ops, 704 .gem_vm_ops = &drm_gem_cma_vm_ops,
705 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
706 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
707 .gem_prime_import = drm_gem_prime_import,
708 .gem_prime_export = drm_gem_prime_export,
709 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
710 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
711 .gem_prime_vmap = drm_gem_cma_prime_vmap,
712 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
713 .gem_prime_mmap = drm_gem_cma_prime_mmap,
500 .dumb_create = drm_gem_cma_dumb_create, 714 .dumb_create = drm_gem_cma_dumb_create,
501 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 715 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
502 .dumb_destroy = drm_gem_dumb_destroy, 716 .dumb_destroy = drm_gem_dumb_destroy,
@@ -559,7 +773,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
559 return 0; 773 return 0;
560} 774}
561 775
562#ifdef CONFIG_PM 776#ifdef CONFIG_PM_SLEEP
563static int atmel_hlcdc_dc_drm_suspend(struct device *dev) 777static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
564{ 778{
565 struct drm_device *drm_dev = dev_get_drvdata(dev); 779 struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 9c4513005310..067e4c144bd6 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -126,12 +126,16 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
126 126
127 if (info->num_bus_formats) { 127 if (info->num_bus_formats) {
128 switch (info->bus_formats[0]) { 128 switch (info->bus_formats[0]) {
129 case MEDIA_BUS_FMT_RGB565_1X16:
130 cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
131 break;
129 case MEDIA_BUS_FMT_RGB666_1X18: 132 case MEDIA_BUS_FMT_RGB666_1X18:
130 cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8; 133 cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
131 break; 134 break;
132 case MEDIA_BUS_FMT_RGB888_1X24: 135 case MEDIA_BUS_FMT_RGB888_1X24:
133 cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8; 136 cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
134 break; 137 break;
138 case MEDIA_BUS_FMT_RGB444_1X12:
135 default: 139 default:
136 break; 140 break;
137 } 141 }
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 98837bde2d25..7f1a3604b19f 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -109,7 +109,7 @@ static int bochs_pm_suspend(struct device *dev)
109 109
110 if (bochs->fb.initialized) { 110 if (bochs->fb.initialized) {
111 console_lock(); 111 console_lock();
112 fb_set_suspend(bochs->fb.helper.fbdev, 1); 112 drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
113 console_unlock(); 113 console_unlock();
114 } 114 }
115 115
@@ -126,7 +126,7 @@ static int bochs_pm_resume(struct device *dev)
126 126
127 if (bochs->fb.initialized) { 127 if (bochs->fb.initialized) {
128 console_lock(); 128 console_lock();
129 fb_set_suspend(bochs->fb.helper.fbdev, 0); 129 drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
130 console_unlock(); 130 console_unlock();
131 } 131 }
132 132
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 976d9798dc99..09a0637aab3e 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -24,9 +24,9 @@ static struct fb_ops bochsfb_ops = {
24 .owner = THIS_MODULE, 24 .owner = THIS_MODULE,
25 .fb_check_var = drm_fb_helper_check_var, 25 .fb_check_var = drm_fb_helper_check_var,
26 .fb_set_par = drm_fb_helper_set_par, 26 .fb_set_par = drm_fb_helper_set_par,
27 .fb_fillrect = sys_fillrect, 27 .fb_fillrect = drm_fb_helper_sys_fillrect,
28 .fb_copyarea = sys_copyarea, 28 .fb_copyarea = drm_fb_helper_sys_copyarea,
29 .fb_imageblit = sys_imageblit, 29 .fb_imageblit = drm_fb_helper_sys_imageblit,
30 .fb_pan_display = drm_fb_helper_pan_display, 30 .fb_pan_display = drm_fb_helper_pan_display,
31 .fb_blank = drm_fb_helper_blank, 31 .fb_blank = drm_fb_helper_blank,
32 .fb_setcmap = drm_fb_helper_setcmap, 32 .fb_setcmap = drm_fb_helper_setcmap,
@@ -56,11 +56,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
56{ 56{
57 struct bochs_device *bochs = 57 struct bochs_device *bochs =
58 container_of(helper, struct bochs_device, fb.helper); 58 container_of(helper, struct bochs_device, fb.helper);
59 struct drm_device *dev = bochs->dev;
60 struct fb_info *info; 59 struct fb_info *info;
61 struct drm_framebuffer *fb; 60 struct drm_framebuffer *fb;
62 struct drm_mode_fb_cmd2 mode_cmd; 61 struct drm_mode_fb_cmd2 mode_cmd;
63 struct device *device = &dev->pdev->dev;
64 struct drm_gem_object *gobj = NULL; 62 struct drm_gem_object *gobj = NULL;
65 struct bochs_bo *bo = NULL; 63 struct bochs_bo *bo = NULL;
66 int size, ret; 64 int size, ret;
@@ -106,22 +104,23 @@ static int bochsfb_create(struct drm_fb_helper *helper,
106 ttm_bo_unreserve(&bo->bo); 104 ttm_bo_unreserve(&bo->bo);
107 105
108 /* init fb device */ 106 /* init fb device */
109 info = framebuffer_alloc(0, device); 107 info = drm_fb_helper_alloc_fbi(helper);
110 if (info == NULL) 108 if (IS_ERR(info))
111 return -ENOMEM; 109 return PTR_ERR(info);
112 110
113 info->par = &bochs->fb.helper; 111 info->par = &bochs->fb.helper;
114 112
115 ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); 113 ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
116 if (ret) 114 if (ret) {
115 drm_fb_helper_release_fbi(helper);
117 return ret; 116 return ret;
117 }
118 118
119 bochs->fb.size = size; 119 bochs->fb.size = size;
120 120
121 /* setup helper */ 121 /* setup helper */
122 fb = &bochs->fb.gfb.base; 122 fb = &bochs->fb.gfb.base;
123 bochs->fb.helper.fb = fb; 123 bochs->fb.helper.fb = fb;
124 bochs->fb.helper.fbdev = info;
125 124
126 strcpy(info->fix.id, "bochsdrmfb"); 125 strcpy(info->fix.id, "bochsdrmfb");
127 126
@@ -139,30 +138,17 @@ static int bochsfb_create(struct drm_fb_helper *helper,
139 info->fix.smem_start = 0; 138 info->fix.smem_start = 0;
140 info->fix.smem_len = size; 139 info->fix.smem_len = size;
141 140
142 ret = fb_alloc_cmap(&info->cmap, 256, 0);
143 if (ret) {
144 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
145 return -ENOMEM;
146 }
147
148 return 0; 141 return 0;
149} 142}
150 143
151static int bochs_fbdev_destroy(struct bochs_device *bochs) 144static int bochs_fbdev_destroy(struct bochs_device *bochs)
152{ 145{
153 struct bochs_framebuffer *gfb = &bochs->fb.gfb; 146 struct bochs_framebuffer *gfb = &bochs->fb.gfb;
154 struct fb_info *info;
155 147
156 DRM_DEBUG_DRIVER("\n"); 148 DRM_DEBUG_DRIVER("\n");
157 149
158 if (bochs->fb.helper.fbdev) { 150 drm_fb_helper_unregister_fbi(&bochs->fb.helper);
159 info = bochs->fb.helper.fbdev; 151 drm_fb_helper_release_fbi(&bochs->fb.helper);
160
161 unregister_framebuffer(info);
162 if (info->cmap.len)
163 fb_dealloc_cmap(&info->cmap);
164 framebuffer_release(info);
165 }
166 152
167 if (gfb->obj) { 153 if (gfb->obj) {
168 drm_gem_object_unreference_unlocked(gfb->obj); 154 drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 66286ff518d4..f69e6bf9bb0e 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -454,25 +454,17 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
454 uint32_t handle, uint64_t *offset) 454 uint32_t handle, uint64_t *offset)
455{ 455{
456 struct drm_gem_object *obj; 456 struct drm_gem_object *obj;
457 int ret;
458 struct bochs_bo *bo; 457 struct bochs_bo *bo;
459 458
460 mutex_lock(&dev->struct_mutex);
461 obj = drm_gem_object_lookup(dev, file, handle); 459 obj = drm_gem_object_lookup(dev, file, handle);
462 if (obj == NULL) { 460 if (obj == NULL)
463 ret = -ENOENT; 461 return -ENOENT;
464 goto out_unlock;
465 }
466 462
467 bo = gem_to_bochs_bo(obj); 463 bo = gem_to_bochs_bo(obj);
468 *offset = bochs_bo_mmap_offset(bo); 464 *offset = bochs_bo_mmap_offset(bo);
469 465
470 drm_gem_object_unreference(obj); 466 drm_gem_object_unreference_unlocked(obj);
471 ret = 0; 467 return 0;
472out_unlock:
473 mutex_unlock(&dev->struct_mutex);
474 return ret;
475
476} 468}
477 469
478/* ---------------------------------------------------------------------- */ 470/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index acef3223772c..2de52a53a803 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -1,24 +1,32 @@
1config DRM_BRIDGE
2 def_bool y
3 depends on DRM
4 help
5 Bridge registration and lookup framework.
6
7menu "Display Interface Bridges"
8 depends on DRM && DRM_BRIDGE
9
1config DRM_DW_HDMI 10config DRM_DW_HDMI
2 tristate 11 tristate
3 depends on DRM
4 select DRM_KMS_HELPER 12 select DRM_KMS_HELPER
5 13
6config DRM_PTN3460 14config DRM_NXP_PTN3460
7 tristate "PTN3460 DP/LVDS bridge" 15 tristate "NXP PTN3460 DP/LVDS bridge"
8 depends on DRM
9 depends on OF 16 depends on OF
10 select DRM_KMS_HELPER 17 select DRM_KMS_HELPER
11 select DRM_PANEL 18 select DRM_PANEL
12 ---help--- 19 ---help---
13 ptn3460 eDP-LVDS bridge chip driver. 20 NXP PTN3460 eDP-LVDS bridge chip driver.
14 21
15config DRM_PS8622 22config DRM_PARADE_PS8622
16 tristate "Parade eDP/LVDS bridge" 23 tristate "Parade eDP/LVDS bridge"
17 depends on DRM
18 depends on OF 24 depends on OF
19 select DRM_PANEL 25 select DRM_PANEL
20 select DRM_KMS_HELPER 26 select DRM_KMS_HELPER
21 select BACKLIGHT_LCD_SUPPORT 27 select BACKLIGHT_LCD_SUPPORT
22 select BACKLIGHT_CLASS_DEVICE 28 select BACKLIGHT_CLASS_DEVICE
23 ---help--- 29 ---help---
24 parade eDP-LVDS bridge chip driver. 30 Parade eDP-LVDS bridge chip driver.
31
32endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 8dfebd984370..e2eef1c2f4c3 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,5 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2 2
3obj-$(CONFIG_DRM_PS8622) += ps8622.o
4obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
5obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o 3obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
4obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
5obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1b1bf2384815..1b1bf2384815 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 1a6607beb29f..1a6607beb29f 100644
--- a/drivers/gpu/drm/bridge/ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b9140032962d..b1619e29a564 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -92,7 +92,7 @@ static int cirrus_pm_suspend(struct device *dev)
92 92
93 if (cdev->mode_info.gfbdev) { 93 if (cdev->mode_info.gfbdev) {
94 console_lock(); 94 console_lock();
95 fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1); 95 drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
96 console_unlock(); 96 console_unlock();
97 } 97 }
98 98
@@ -109,7 +109,7 @@ static int cirrus_pm_resume(struct device *dev)
109 109
110 if (cdev->mode_info.gfbdev) { 110 if (cdev->mode_info.gfbdev) {
111 console_lock(); 111 console_lock();
112 fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0); 112 drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
113 console_unlock(); 113 console_unlock();
114 } 114 }
115 115
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 13ddf1c4bb8e..589103bcc06c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -98,7 +98,7 @@ static void cirrus_fillrect(struct fb_info *info,
98 const struct fb_fillrect *rect) 98 const struct fb_fillrect *rect)
99{ 99{
100 struct cirrus_fbdev *afbdev = info->par; 100 struct cirrus_fbdev *afbdev = info->par;
101 sys_fillrect(info, rect); 101 drm_fb_helper_sys_fillrect(info, rect);
102 cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width, 102 cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
103 rect->height); 103 rect->height);
104} 104}
@@ -107,7 +107,7 @@ static void cirrus_copyarea(struct fb_info *info,
107 const struct fb_copyarea *area) 107 const struct fb_copyarea *area)
108{ 108{
109 struct cirrus_fbdev *afbdev = info->par; 109 struct cirrus_fbdev *afbdev = info->par;
110 sys_copyarea(info, area); 110 drm_fb_helper_sys_copyarea(info, area);
111 cirrus_dirty_update(afbdev, area->dx, area->dy, area->width, 111 cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
112 area->height); 112 area->height);
113} 113}
@@ -116,7 +116,7 @@ static void cirrus_imageblit(struct fb_info *info,
116 const struct fb_image *image) 116 const struct fb_image *image)
117{ 117{
118 struct cirrus_fbdev *afbdev = info->par; 118 struct cirrus_fbdev *afbdev = info->par;
119 sys_imageblit(info, image); 119 drm_fb_helper_sys_imageblit(info, image);
120 cirrus_dirty_update(afbdev, image->dx, image->dy, image->width, 120 cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
121 image->height); 121 image->height);
122} 122}
@@ -165,12 +165,10 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
165{ 165{
166 struct cirrus_fbdev *gfbdev = 166 struct cirrus_fbdev *gfbdev =
167 container_of(helper, struct cirrus_fbdev, helper); 167 container_of(helper, struct cirrus_fbdev, helper);
168 struct drm_device *dev = gfbdev->helper.dev;
169 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; 168 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
170 struct fb_info *info; 169 struct fb_info *info;
171 struct drm_framebuffer *fb; 170 struct drm_framebuffer *fb;
172 struct drm_mode_fb_cmd2 mode_cmd; 171 struct drm_mode_fb_cmd2 mode_cmd;
173 struct device *device = &dev->pdev->dev;
174 void *sysram; 172 void *sysram;
175 struct drm_gem_object *gobj = NULL; 173 struct drm_gem_object *gobj = NULL;
176 struct cirrus_bo *bo = NULL; 174 struct cirrus_bo *bo = NULL;
@@ -195,9 +193,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
195 if (!sysram) 193 if (!sysram)
196 return -ENOMEM; 194 return -ENOMEM;
197 195
198 info = framebuffer_alloc(0, device); 196 info = drm_fb_helper_alloc_fbi(helper);
199 if (info == NULL) 197 if (IS_ERR(info))
200 return -ENOMEM; 198 return PTR_ERR(info);
201 199
202 info->par = gfbdev; 200 info->par = gfbdev;
203 201
@@ -216,11 +214,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
216 214
217 /* setup helper */ 215 /* setup helper */
218 gfbdev->helper.fb = fb; 216 gfbdev->helper.fb = fb;
219 gfbdev->helper.fbdev = info;
220 217
221 strcpy(info->fix.id, "cirrusdrmfb"); 218 strcpy(info->fix.id, "cirrusdrmfb");
222 219
223
224 info->flags = FBINFO_DEFAULT; 220 info->flags = FBINFO_DEFAULT;
225 info->fbops = &cirrusfb_ops; 221 info->fbops = &cirrusfb_ops;
226 222
@@ -229,11 +225,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
229 sizes->fb_height); 225 sizes->fb_height);
230 226
231 /* setup aperture base/size for vesafb takeover */ 227 /* setup aperture base/size for vesafb takeover */
232 info->apertures = alloc_apertures(1);
233 if (!info->apertures) {
234 ret = -ENOMEM;
235 goto out_iounmap;
236 }
237 info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; 228 info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
238 info->apertures->ranges[0].size = cdev->mc.vram_size; 229 info->apertures->ranges[0].size = cdev->mc.vram_size;
239 230
@@ -246,13 +237,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
246 info->fix.mmio_start = 0; 237 info->fix.mmio_start = 0;
247 info->fix.mmio_len = 0; 238 info->fix.mmio_len = 0;
248 239
249 ret = fb_alloc_cmap(&info->cmap, 256, 0);
250 if (ret) {
251 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
252 ret = -ENOMEM;
253 goto out_iounmap;
254 }
255
256 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 240 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
257 DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start); 241 DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
258 DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len); 242 DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
@@ -260,24 +244,15 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
260 DRM_INFO(" pitch is %d\n", fb->pitches[0]); 244 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
261 245
262 return 0; 246 return 0;
263out_iounmap:
264 return ret;
265} 247}
266 248
267static int cirrus_fbdev_destroy(struct drm_device *dev, 249static int cirrus_fbdev_destroy(struct drm_device *dev,
268 struct cirrus_fbdev *gfbdev) 250 struct cirrus_fbdev *gfbdev)
269{ 251{
270 struct fb_info *info;
271 struct cirrus_framebuffer *gfb = &gfbdev->gfb; 252 struct cirrus_framebuffer *gfb = &gfbdev->gfb;
272 253
273 if (gfbdev->helper.fbdev) { 254 drm_fb_helper_unregister_fbi(&gfbdev->helper);
274 info = gfbdev->helper.fbdev; 255 drm_fb_helper_release_fbi(&gfbdev->helper);
275
276 unregister_framebuffer(info);
277 if (info->cmap.len)
278 fb_dealloc_cmap(&info->cmap);
279 framebuffer_release(info);
280 }
281 256
282 if (gfb->obj) { 257 if (gfb->obj) {
283 drm_gem_object_unreference_unlocked(gfb->obj); 258 drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e4b976658087..055fd86ba717 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -293,25 +293,18 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
293 uint64_t *offset) 293 uint64_t *offset)
294{ 294{
295 struct drm_gem_object *obj; 295 struct drm_gem_object *obj;
296 int ret;
297 struct cirrus_bo *bo; 296 struct cirrus_bo *bo;
298 297
299 mutex_lock(&dev->struct_mutex);
300 obj = drm_gem_object_lookup(dev, file, handle); 298 obj = drm_gem_object_lookup(dev, file, handle);
301 if (obj == NULL) { 299 if (obj == NULL)
302 ret = -ENOENT; 300 return -ENOENT;
303 goto out_unlock;
304 }
305 301
306 bo = gem_to_cirrus_bo(obj); 302 bo = gem_to_cirrus_bo(obj);
307 *offset = cirrus_bo_mmap_offset(bo); 303 *offset = cirrus_bo_mmap_offset(bo);
308 304
309 drm_gem_object_unreference(obj); 305 drm_gem_object_unreference_unlocked(obj);
310 ret = 0;
311out_unlock:
312 mutex_unlock(&dev->struct_mutex);
313 return ret;
314 306
307 return 0;
315} 308}
316 309
317bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, 310bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3efd91c0c6cb..1066e4b658cf 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -153,9 +153,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
153 if (!connector) 153 if (!connector)
154 continue; 154 continue;
155 155
156 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 156 /*
157 157 * FIXME: Async commits can race with connector unplugging and
158 connector->funcs->atomic_destroy_state(connector, 158 * there's currently nothing that prevents cleanup up state for
159 * deleted connectors. As long as the callback doesn't look at
160 * the connector we'll be fine though, so make sure that's the
161 * case by setting all connector pointers to NULL.
162 */
163 state->connector_states[i]->connector = NULL;
164 connector->funcs->atomic_destroy_state(NULL,
159 state->connector_states[i]); 165 state->connector_states[i]);
160 state->connectors[i] = NULL; 166 state->connectors[i] = NULL;
161 state->connector_states[i] = NULL; 167 state->connector_states[i] = NULL;
@@ -1224,6 +1230,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1224 } 1230 }
1225 } 1231 }
1226 1232
1233 if (ret == 0)
1234 ww_acquire_done(&state->acquire_ctx->ww_ctx);
1235
1227 return ret; 1236 return ret;
1228} 1237}
1229EXPORT_SYMBOL(drm_atomic_check_only); 1238EXPORT_SYMBOL(drm_atomic_check_only);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index cf27b6b605d8..d432348837a5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -307,7 +307,7 @@ mode_fixup(struct drm_atomic_state *state)
307 encoder->base.id, encoder->name); 307 encoder->base.id, encoder->name);
308 return ret; 308 return ret;
309 } 309 }
310 } else { 310 } else if (funcs->mode_fixup) {
311 ret = funcs->mode_fixup(encoder, &crtc_state->mode, 311 ret = funcs->mode_fixup(encoder, &crtc_state->mode,
312 &crtc_state->adjusted_mode); 312 &crtc_state->adjusted_mode);
313 if (!ret) { 313 if (!ret) {
@@ -966,7 +966,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
966 continue; 966 continue;
967 967
968 old_crtc_state->enable = true; 968 old_crtc_state->enable = true;
969 old_crtc_state->last_vblank_count = drm_vblank_count(dev, i); 969 old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
970 } 970 }
971 971
972 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { 972 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -975,7 +975,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
975 975
976 ret = wait_event_timeout(dev->vblank[i].queue, 976 ret = wait_event_timeout(dev->vblank[i].queue,
977 old_crtc_state->last_vblank_count != 977 old_crtc_state->last_vblank_count !=
978 drm_vblank_count(dev, i), 978 drm_crtc_vblank_count(crtc),
979 msecs_to_jiffies(50)); 979 msecs_to_jiffies(50));
980 980
981 drm_crtc_vblank_put(crtc); 981 drm_crtc_vblank_put(crtc);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ca077657604e..33d877c65ced 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
1151int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, 1151int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1152 unsigned long possible_crtcs, 1152 unsigned long possible_crtcs,
1153 const struct drm_plane_funcs *funcs, 1153 const struct drm_plane_funcs *funcs,
1154 const uint32_t *formats, uint32_t format_count, 1154 const uint32_t *formats, unsigned int format_count,
1155 enum drm_plane_type type) 1155 enum drm_plane_type type)
1156{ 1156{
1157 struct drm_mode_config *config = &dev->mode_config; 1157 struct drm_mode_config *config = &dev->mode_config;
@@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(drm_universal_plane_init);
1225int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, 1225int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
1226 unsigned long possible_crtcs, 1226 unsigned long possible_crtcs,
1227 const struct drm_plane_funcs *funcs, 1227 const struct drm_plane_funcs *funcs,
1228 const uint32_t *formats, uint32_t format_count, 1228 const uint32_t *formats, unsigned int format_count,
1229 bool is_primary) 1229 bool is_primary)
1230{ 1230{
1231 enum drm_plane_type type; 1231 enum drm_plane_type type;
@@ -5273,9 +5273,11 @@ void drm_mode_config_reset(struct drm_device *dev)
5273 if (encoder->funcs->reset) 5273 if (encoder->funcs->reset)
5274 encoder->funcs->reset(encoder); 5274 encoder->funcs->reset(encoder);
5275 5275
5276 mutex_lock(&dev->mode_config.mutex);
5276 drm_for_each_connector(connector, dev) 5277 drm_for_each_connector(connector, dev)
5277 if (connector->funcs->reset) 5278 if (connector->funcs->reset)
5278 connector->funcs->reset(connector); 5279 connector->funcs->reset(connector);
5280 mutex_unlock(&dev->mode_config.mutex);
5279} 5281}
5280EXPORT_SYMBOL(drm_mode_config_reset); 5282EXPORT_SYMBOL(drm_mode_config_reset);
5281 5283
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b0487c9f018c..e23df5fd3836 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
873 from an EDID retrieval */ 873 from an EDID retrieval */
874 if (port->connector) { 874 if (port->connector) {
875 mutex_lock(&mgr->destroy_connector_lock); 875 mutex_lock(&mgr->destroy_connector_lock);
876 list_add(&port->connector->destroy_list, &mgr->destroy_connector_list); 876 list_add(&port->next, &mgr->destroy_connector_list);
877 mutex_unlock(&mgr->destroy_connector_lock); 877 mutex_unlock(&mgr->destroy_connector_lock);
878 schedule_work(&mgr->destroy_connector_work); 878 schedule_work(&mgr->destroy_connector_work);
879 return;
879 } 880 }
880 drm_dp_port_teardown_pdt(port, port->pdt); 881 drm_dp_port_teardown_pdt(port, port->pdt);
881 882
@@ -2631,6 +2632,16 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
2631 seq_printf(m, "%02x ", buf[i]); 2632 seq_printf(m, "%02x ", buf[i]);
2632 seq_printf(m, "\n"); 2633 seq_printf(m, "\n");
2633 2634
2635 /* dump the standard OUI branch header */
2636 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
2637 seq_printf(m, "branch oui: ");
2638 for (i = 0; i < 0x3; i++)
2639 seq_printf(m, "%02x", buf[i]);
2640 seq_printf(m, " devid: ");
2641 for (i = 0x3; i < 0x8; i++)
2642 seq_printf(m, "%c", buf[i]);
2643 seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
2644 seq_printf(m, "\n");
2634 bret = dump_dp_payload_table(mgr, buf); 2645 bret = dump_dp_payload_table(mgr, buf);
2635 if (bret == true) { 2646 if (bret == true) {
2636 seq_printf(m, "payload table: "); 2647 seq_printf(m, "payload table: ");
@@ -2659,7 +2670,7 @@ static void drm_dp_tx_work(struct work_struct *work)
2659static void drm_dp_destroy_connector_work(struct work_struct *work) 2670static void drm_dp_destroy_connector_work(struct work_struct *work)
2660{ 2671{
2661 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2672 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2662 struct drm_connector *connector; 2673 struct drm_dp_mst_port *port;
2663 2674
2664 /* 2675 /*
2665 * Not a regular list traverse as we have to drop the destroy 2676 * Not a regular list traverse as we have to drop the destroy
@@ -2668,15 +2679,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2668 */ 2679 */
2669 for (;;) { 2680 for (;;) {
2670 mutex_lock(&mgr->destroy_connector_lock); 2681 mutex_lock(&mgr->destroy_connector_lock);
2671 connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list); 2682 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2672 if (!connector) { 2683 if (!port) {
2673 mutex_unlock(&mgr->destroy_connector_lock); 2684 mutex_unlock(&mgr->destroy_connector_lock);
2674 break; 2685 break;
2675 } 2686 }
2676 list_del(&connector->destroy_list); 2687 list_del(&port->next);
2677 mutex_unlock(&mgr->destroy_connector_lock); 2688 mutex_unlock(&mgr->destroy_connector_lock);
2678 2689
2679 mgr->cbs->destroy_connector(mgr, connector); 2690 mgr->cbs->destroy_connector(mgr, port->connector);
2691
2692 drm_dp_port_teardown_pdt(port, port->pdt);
2693
2694 if (!port->input && port->vcpi.vcpi > 0)
2695 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2696 kfree(port);
2680 } 2697 }
2681} 2698}
2682 2699
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e6e05bb75a77..05bb7311ac5d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3802,7 +3802,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
3802 struct drm_display_mode *mode; 3802 struct drm_display_mode *mode;
3803 struct drm_device *dev = connector->dev; 3803 struct drm_device *dev = connector->dev;
3804 3804
3805 count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); 3805 count = ARRAY_SIZE(drm_dmt_modes);
3806 if (hdisplay < 0) 3806 if (hdisplay < 0)
3807 hdisplay = 0; 3807 hdisplay = 0;
3808 if (vdisplay < 0) 3808 if (vdisplay < 0)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f01dc25df2dc..c19a62561183 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -222,9 +222,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
222 222
223static struct fb_ops drm_fbdev_cma_ops = { 223static struct fb_ops drm_fbdev_cma_ops = {
224 .owner = THIS_MODULE, 224 .owner = THIS_MODULE,
225 .fb_fillrect = sys_fillrect, 225 .fb_fillrect = drm_fb_helper_sys_fillrect,
226 .fb_copyarea = sys_copyarea, 226 .fb_copyarea = drm_fb_helper_sys_copyarea,
227 .fb_imageblit = sys_imageblit, 227 .fb_imageblit = drm_fb_helper_sys_imageblit,
228 .fb_check_var = drm_fb_helper_check_var, 228 .fb_check_var = drm_fb_helper_check_var,
229 .fb_set_par = drm_fb_helper_set_par, 229 .fb_set_par = drm_fb_helper_set_par,
230 .fb_blank = drm_fb_helper_blank, 230 .fb_blank = drm_fb_helper_blank,
@@ -263,10 +263,9 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
263 if (IS_ERR(obj)) 263 if (IS_ERR(obj))
264 return -ENOMEM; 264 return -ENOMEM;
265 265
266 fbi = framebuffer_alloc(0, dev->dev); 266 fbi = drm_fb_helper_alloc_fbi(helper);
267 if (!fbi) { 267 if (IS_ERR(fbi)) {
268 dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); 268 ret = PTR_ERR(fbi);
269 ret = -ENOMEM;
270 goto err_drm_gem_cma_free_object; 269 goto err_drm_gem_cma_free_object;
271 } 270 }
272 271
@@ -274,23 +273,16 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
274 if (IS_ERR(fbdev_cma->fb)) { 273 if (IS_ERR(fbdev_cma->fb)) {
275 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); 274 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
276 ret = PTR_ERR(fbdev_cma->fb); 275 ret = PTR_ERR(fbdev_cma->fb);
277 goto err_framebuffer_release; 276 goto err_fb_info_destroy;
278 } 277 }
279 278
280 fb = &fbdev_cma->fb->fb; 279 fb = &fbdev_cma->fb->fb;
281 helper->fb = fb; 280 helper->fb = fb;
282 helper->fbdev = fbi;
283 281
284 fbi->par = helper; 282 fbi->par = helper;
285 fbi->flags = FBINFO_FLAG_DEFAULT; 283 fbi->flags = FBINFO_FLAG_DEFAULT;
286 fbi->fbops = &drm_fbdev_cma_ops; 284 fbi->fbops = &drm_fbdev_cma_ops;
287 285
288 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
289 if (ret) {
290 dev_err(dev->dev, "Failed to allocate color map.\n");
291 goto err_drm_fb_cma_destroy;
292 }
293
294 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 286 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
295 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 287 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
296 288
@@ -305,11 +297,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
305 297
306 return 0; 298 return 0;
307 299
308err_drm_fb_cma_destroy: 300err_fb_info_destroy:
309 drm_framebuffer_unregister_private(fb); 301 drm_fb_helper_release_fbi(helper);
310 drm_fb_cma_destroy(fb);
311err_framebuffer_release:
312 framebuffer_release(fbi);
313err_drm_gem_cma_free_object: 302err_drm_gem_cma_free_object:
314 drm_gem_cma_free_object(&obj->base); 303 drm_gem_cma_free_object(&obj->base);
315 return ret; 304 return ret;
@@ -385,20 +374,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
385 */ 374 */
386void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) 375void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
387{ 376{
388 if (fbdev_cma->fb_helper.fbdev) { 377 drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
389 struct fb_info *info; 378 drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
390 int ret;
391
392 info = fbdev_cma->fb_helper.fbdev;
393 ret = unregister_framebuffer(info);
394 if (ret < 0)
395 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
396
397 if (info->cmap.len)
398 fb_dealloc_cmap(&info->cmap);
399
400 framebuffer_release(info);
401 }
402 379
403 if (fbdev_cma->fb) { 380 if (fbdev_cma->fb) {
404 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb); 381 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 73f90f7e2f74..418d299f3b12 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,8 +56,8 @@ static LIST_HEAD(kernel_fb_helper_list);
56 * Teardown is done with drm_fb_helper_fini(). 56 * Teardown is done with drm_fb_helper_fini().
57 * 57 *
58 * At runtime drivers should restore the fbdev console by calling 58 * At runtime drivers should restore the fbdev console by calling
59 * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They 59 * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
60 * should also notify the fb helper code from updates to the output 60 * They should also notify the fb helper code from updates to the output
61 * configuration by calling drm_fb_helper_hotplug_event(). For easier 61 * configuration by calling drm_fb_helper_hotplug_event(). For easier
62 * integration with the output polling code in drm_crtc_helper.c the modeset 62 * integration with the output polling code in drm_crtc_helper.c the modeset
63 * code provides a ->output_poll_changed callback. 63 * code provides a ->output_poll_changed callback.
@@ -168,11 +168,14 @@ static void remove_from_modeset(struct drm_mode_set *set,
168 } 168 }
169 set->num_connectors--; 169 set->num_connectors--;
170 170
171 /* because i915 is pissy about this.. 171 /*
172 * TODO maybe need to makes sure we set it back to !=NULL somewhere? 172 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
173 */ 173 */
174 if (set->num_connectors == 0) 174 if (set->num_connectors == 0) {
175 set->fb = NULL; 175 set->fb = NULL;
176 drm_mode_destroy(connector->dev, set->mode);
177 set->mode = NULL;
178 }
176} 179}
177 180
178int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 181int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -354,21 +357,6 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
354 } 357 }
355 return error; 358 return error;
356} 359}
357/**
358 * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
359 * @fb_helper: fbcon to restore
360 *
361 * This should be called from driver's drm ->lastclose callback
362 * when implementing an fbcon on top of kms using this helper. This ensures that
363 * the user isn't greeted with a black screen when e.g. X dies.
364 *
365 * Use this variant if you need to bypass locking (panic), or already
366 * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
367 */
368static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
369{
370 return restore_fbdev_mode(fb_helper);
371}
372 360
373/** 361/**
374 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration 362 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
@@ -398,42 +386,6 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
398} 386}
399EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 387EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
400 388
401/*
402 * restore fbcon display for all kms driver's using this helper, used for sysrq
403 * and panic handling.
404 */
405static bool drm_fb_helper_force_kernel_mode(void)
406{
407 bool ret, error = false;
408 struct drm_fb_helper *helper;
409
410 if (list_empty(&kernel_fb_helper_list))
411 return false;
412
413 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
414 struct drm_device *dev = helper->dev;
415
416 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
417 continue;
418
419 /*
420 * NOTE: Use trylock mode to avoid deadlocks and sleeping in
421 * panic context.
422 */
423 if (__drm_modeset_lock_all(dev, true) != 0) {
424 error = true;
425 continue;
426 }
427
428 ret = drm_fb_helper_restore_fbdev_mode(helper);
429 if (ret)
430 error = true;
431
432 drm_modeset_unlock_all(dev);
433 }
434 return error;
435}
436
437static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) 389static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
438{ 390{
439 struct drm_device *dev = fb_helper->dev; 391 struct drm_device *dev = fb_helper->dev;
@@ -459,6 +411,33 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
459} 411}
460 412
461#ifdef CONFIG_MAGIC_SYSRQ 413#ifdef CONFIG_MAGIC_SYSRQ
414/*
415 * restore fbcon display for all kms driver's using this helper, used for sysrq
416 * and panic handling.
417 */
418static bool drm_fb_helper_force_kernel_mode(void)
419{
420 bool ret, error = false;
421 struct drm_fb_helper *helper;
422
423 if (list_empty(&kernel_fb_helper_list))
424 return false;
425
426 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
427 struct drm_device *dev = helper->dev;
428
429 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
430 continue;
431
432 drm_modeset_lock_all(dev);
433 ret = restore_fbdev_mode(helper);
434 if (ret)
435 error = true;
436 drm_modeset_unlock_all(dev);
437 }
438 return error;
439}
440
462static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 441static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
463{ 442{
464 bool ret; 443 bool ret;
@@ -491,14 +470,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
491 int i, j; 470 int i, j;
492 471
493 /* 472 /*
494 * fbdev->blank can be called from irq context in case of a panic.
495 * Since we already have our own special panic handler which will
496 * restore the fbdev console mode completely, just bail out early.
497 */
498 if (oops_in_progress)
499 return;
500
501 /*
502 * For each CRTC in this fb, turn the connectors on/off. 473 * For each CRTC in this fb, turn the connectors on/off.
503 */ 474 */
504 drm_modeset_lock_all(dev); 475 drm_modeset_lock_all(dev);
@@ -531,6 +502,9 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
531 */ 502 */
532int drm_fb_helper_blank(int blank, struct fb_info *info) 503int drm_fb_helper_blank(int blank, struct fb_info *info)
533{ 504{
505 if (oops_in_progress)
506 return -EBUSY;
507
534 switch (blank) { 508 switch (blank) {
535 /* Display: On; HSync: On, VSync: On */ 509 /* Display: On; HSync: On, VSync: On */
536 case FB_BLANK_UNBLANK: 510 case FB_BLANK_UNBLANK:
@@ -654,6 +628,86 @@ out_free:
654} 628}
655EXPORT_SYMBOL(drm_fb_helper_init); 629EXPORT_SYMBOL(drm_fb_helper_init);
656 630
631/**
632 * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
633 * @fb_helper: driver-allocated fbdev helper
634 *
635 * A helper to alloc fb_info and the members cmap and apertures. Called
636 * by the driver within the fb_probe fb_helper callback function.
637 *
638 * RETURNS:
639 * fb_info pointer if things went okay, pointer containing error code
640 * otherwise
641 */
642struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
643{
644 struct device *dev = fb_helper->dev->dev;
645 struct fb_info *info;
646 int ret;
647
648 info = framebuffer_alloc(0, dev);
649 if (!info)
650 return ERR_PTR(-ENOMEM);
651
652 ret = fb_alloc_cmap(&info->cmap, 256, 0);
653 if (ret)
654 goto err_release;
655
656 info->apertures = alloc_apertures(1);
657 if (!info->apertures) {
658 ret = -ENOMEM;
659 goto err_free_cmap;
660 }
661
662 fb_helper->fbdev = info;
663
664 return info;
665
666err_free_cmap:
667 fb_dealloc_cmap(&info->cmap);
668err_release:
669 framebuffer_release(info);
670 return ERR_PTR(ret);
671}
672EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
673
674/**
675 * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
676 * @fb_helper: driver-allocated fbdev helper
677 *
678 * A wrapper around unregister_framebuffer, to release the fb_info
679 * framebuffer device
680 */
681void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
682{
683 if (fb_helper && fb_helper->fbdev)
684 unregister_framebuffer(fb_helper->fbdev);
685}
686EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
687
688/**
689 * drm_fb_helper_release_fbi - dealloc fb_info and its members
690 * @fb_helper: driver-allocated fbdev helper
691 *
692 * A helper to free memory taken by fb_info and the members cmap and
693 * apertures
694 */
695void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
696{
697 if (fb_helper) {
698 struct fb_info *info = fb_helper->fbdev;
699
700 if (info) {
701 if (info->cmap.len)
702 fb_dealloc_cmap(&info->cmap);
703 framebuffer_release(info);
704 }
705
706 fb_helper->fbdev = NULL;
707 }
708}
709EXPORT_SYMBOL(drm_fb_helper_release_fbi);
710
657void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) 711void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
658{ 712{
659 if (!list_empty(&fb_helper->kernel_fb_list)) { 713 if (!list_empty(&fb_helper->kernel_fb_list)) {
@@ -668,6 +722,149 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
668} 722}
669EXPORT_SYMBOL(drm_fb_helper_fini); 723EXPORT_SYMBOL(drm_fb_helper_fini);
670 724
725/**
726 * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
727 * @fb_helper: driver-allocated fbdev helper
728 *
729 * A wrapper around unlink_framebuffer implemented by fbdev core
730 */
731void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
732{
733 if (fb_helper && fb_helper->fbdev)
734 unlink_framebuffer(fb_helper->fbdev);
735}
736EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
737
738/**
739 * drm_fb_helper_sys_read - wrapper around fb_sys_read
740 * @info: fb_info struct pointer
741 * @buf: userspace buffer to read from framebuffer memory
742 * @count: number of bytes to read from framebuffer memory
743 * @ppos: read offset within framebuffer memory
744 *
745 * A wrapper around fb_sys_read implemented by fbdev core
746 */
747ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
748 size_t count, loff_t *ppos)
749{
750 return fb_sys_read(info, buf, count, ppos);
751}
752EXPORT_SYMBOL(drm_fb_helper_sys_read);
753
754/**
755 * drm_fb_helper_sys_write - wrapper around fb_sys_write
756 * @info: fb_info struct pointer
757 * @buf: userspace buffer to write to framebuffer memory
758 * @count: number of bytes to write to framebuffer memory
759 * @ppos: write offset within framebuffer memory
760 *
761 * A wrapper around fb_sys_write implemented by fbdev core
762 */
763ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
764 size_t count, loff_t *ppos)
765{
766 return fb_sys_write(info, buf, count, ppos);
767}
768EXPORT_SYMBOL(drm_fb_helper_sys_write);
769
770/**
771 * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
772 * @info: fbdev registered by the helper
773 * @rect: info about rectangle to fill
774 *
775 * A wrapper around sys_fillrect implemented by fbdev core
776 */
777void drm_fb_helper_sys_fillrect(struct fb_info *info,
778 const struct fb_fillrect *rect)
779{
780 sys_fillrect(info, rect);
781}
782EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
783
784/**
785 * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
786 * @info: fbdev registered by the helper
787 * @area: info about area to copy
788 *
789 * A wrapper around sys_copyarea implemented by fbdev core
790 */
791void drm_fb_helper_sys_copyarea(struct fb_info *info,
792 const struct fb_copyarea *area)
793{
794 sys_copyarea(info, area);
795}
796EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
797
798/**
799 * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
800 * @info: fbdev registered by the helper
801 * @image: info about image to blit
802 *
803 * A wrapper around sys_imageblit implemented by fbdev core
804 */
805void drm_fb_helper_sys_imageblit(struct fb_info *info,
806 const struct fb_image *image)
807{
808 sys_imageblit(info, image);
809}
810EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
811
812/**
813 * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
814 * @info: fbdev registered by the helper
815 * @rect: info about rectangle to fill
816 *
817 * A wrapper around cfb_imageblit implemented by fbdev core
818 */
819void drm_fb_helper_cfb_fillrect(struct fb_info *info,
820 const struct fb_fillrect *rect)
821{
822 cfb_fillrect(info, rect);
823}
824EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
825
826/**
827 * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
828 * @info: fbdev registered by the helper
829 * @area: info about area to copy
830 *
831 * A wrapper around cfb_copyarea implemented by fbdev core
832 */
833void drm_fb_helper_cfb_copyarea(struct fb_info *info,
834 const struct fb_copyarea *area)
835{
836 cfb_copyarea(info, area);
837}
838EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
839
840/**
841 * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
842 * @info: fbdev registered by the helper
843 * @image: info about image to blit
844 *
845 * A wrapper around cfb_imageblit implemented by fbdev core
846 */
847void drm_fb_helper_cfb_imageblit(struct fb_info *info,
848 const struct fb_image *image)
849{
850 cfb_imageblit(info, image);
851}
852EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
853
854/**
855 * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
856 * @fb_helper: driver-allocated fbdev helper
857 * @state: desired state, zero to resume, non-zero to suspend
858 *
859 * A wrapper around fb_set_suspend implemented by fbdev core
860 */
861void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
862{
863 if (fb_helper && fb_helper->fbdev)
864 fb_set_suspend(fb_helper->fbdev, state);
865}
866EXPORT_SYMBOL(drm_fb_helper_set_suspend);
867
671static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 868static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
672 u16 blue, u16 regno, struct fb_info *info) 869 u16 blue, u16 regno, struct fb_info *info)
673{ 870{
@@ -755,9 +952,10 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
755 int i, j, rc = 0; 952 int i, j, rc = 0;
756 int start; 953 int start;
757 954
758 if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { 955 if (oops_in_progress)
759 return -EBUSY; 956 return -EBUSY;
760 } 957
958 drm_modeset_lock_all(dev);
761 if (!drm_fb_helper_is_bound(fb_helper)) { 959 if (!drm_fb_helper_is_bound(fb_helper)) {
762 drm_modeset_unlock_all(dev); 960 drm_modeset_unlock_all(dev);
763 return -EBUSY; 961 return -EBUSY;
@@ -906,6 +1104,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
906 struct drm_fb_helper *fb_helper = info->par; 1104 struct drm_fb_helper *fb_helper = info->par;
907 struct fb_var_screeninfo *var = &info->var; 1105 struct fb_var_screeninfo *var = &info->var;
908 1106
1107 if (oops_in_progress)
1108 return -EBUSY;
1109
909 if (var->pixclock != 0) { 1110 if (var->pixclock != 0) {
910 DRM_ERROR("PIXEL CLOCK SET\n"); 1111 DRM_ERROR("PIXEL CLOCK SET\n");
911 return -EINVAL; 1112 return -EINVAL;
@@ -931,9 +1132,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
931 int ret = 0; 1132 int ret = 0;
932 int i; 1133 int i;
933 1134
934 if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { 1135 if (oops_in_progress)
935 return -EBUSY; 1136 return -EBUSY;
936 } 1137
1138 drm_modeset_lock_all(dev);
937 if (!drm_fb_helper_is_bound(fb_helper)) { 1139 if (!drm_fb_helper_is_bound(fb_helper)) {
938 drm_modeset_unlock_all(dev); 1140 drm_modeset_unlock_all(dev);
939 return -EBUSY; 1141 return -EBUSY;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 27a4228b4343..3c2d4abd71c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -766,7 +766,7 @@ drm_gem_object_free(struct kref *kref)
766 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 766 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
767 struct drm_device *dev = obj->dev; 767 struct drm_device *dev = obj->dev;
768 768
769 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 769 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
770 770
771 if (dev->driver->gem_free_object != NULL) 771 if (dev->driver->gem_free_object != NULL)
772 dev->driver->gem_free_object(obj); 772 dev->driver->gem_free_object(obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 9edad11dca98..86cc793cdf79 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -289,20 +289,15 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
289{ 289{
290 struct drm_gem_object *gem_obj; 290 struct drm_gem_object *gem_obj;
291 291
292 mutex_lock(&drm->struct_mutex);
293
294 gem_obj = drm_gem_object_lookup(drm, file_priv, handle); 292 gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
295 if (!gem_obj) { 293 if (!gem_obj) {
296 dev_err(drm->dev, "failed to lookup GEM object\n"); 294 dev_err(drm->dev, "failed to lookup GEM object\n");
297 mutex_unlock(&drm->struct_mutex);
298 return -EINVAL; 295 return -EINVAL;
299 } 296 }
300 297
301 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 298 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
302 299
303 drm_gem_object_unreference(gem_obj); 300 drm_gem_object_unreference_unlocked(gem_obj);
304
305 mutex_unlock(&drm->struct_mutex);
306 301
307 return 0; 302 return 0;
308} 303}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b1d303fa2327..9a860ca1e9d7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data,
480 * indicated permissions. If so, returns zero. Otherwise returns an 480 * indicated permissions. If so, returns zero. Otherwise returns an
481 * error code suitable for ioctl return. 481 * error code suitable for ioctl return.
482 */ 482 */
483static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) 483int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
484{ 484{
485 /* ROOT_ONLY is only for CAP_SYS_ADMIN */ 485 /* ROOT_ONLY is only for CAP_SYS_ADMIN */
486 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) 486 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
508 508
509 return 0; 509 return 0;
510} 510}
511EXPORT_SYMBOL(drm_ioctl_permit);
511 512
512#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 513#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
513 [DRM_IOCTL_NR(ioctl)] = { \ 514 [DRM_IOCTL_NR(ioctl)] = { \
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index ee14324522ce..22d207e211e7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,8 +43,8 @@
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */ 45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, crtc, count) \ 46#define vblanktimestamp(dev, pipe, count) \
47 ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE]) 47 ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
48 48
49/* Retry timestamp calculation up to 3 times to satisfy 49/* Retry timestamp calculation up to 3 times to satisfy
50 * drm_timestamp_precision before giving up. 50 * drm_timestamp_precision before giving up.
@@ -57,7 +57,7 @@
57#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 57#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
58 58
59static bool 59static bool
60drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 60drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
61 struct timeval *tvblank, unsigned flags); 61 struct timeval *tvblank, unsigned flags);
62 62
63static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 63static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
@@ -107,7 +107,7 @@ static void store_vblank(struct drm_device *dev, int crtc,
107/** 107/**
108 * drm_update_vblank_count - update the master vblank counter 108 * drm_update_vblank_count - update the master vblank counter
109 * @dev: DRM device 109 * @dev: DRM device
110 * @crtc: counter to update 110 * @pipe: counter to update
111 * 111 *
112 * Call back into the driver to update the appropriate vblank counter 112 * Call back into the driver to update the appropriate vblank counter
113 * (specified by @crtc). Deal with wraparound, if it occurred, and 113 * (specified by @crtc). Deal with wraparound, if it occurred, and
@@ -120,9 +120,9 @@ static void store_vblank(struct drm_device *dev, int crtc,
120 * Note: caller must hold dev->vbl_lock since this reads & writes 120 * Note: caller must hold dev->vbl_lock since this reads & writes
121 * device vblank fields. 121 * device vblank fields.
122 */ 122 */
123static void drm_update_vblank_count(struct drm_device *dev, int crtc) 123static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
124{ 124{
125 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 125 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
126 u32 cur_vblank, diff; 126 u32 cur_vblank, diff;
127 bool rc; 127 bool rc;
128 struct timeval t_vblank; 128 struct timeval t_vblank;
@@ -140,21 +140,21 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
140 * corresponding vblank timestamp. 140 * corresponding vblank timestamp.
141 */ 141 */
142 do { 142 do {
143 cur_vblank = dev->driver->get_vblank_counter(dev, crtc); 143 cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
144 rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); 144 rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
145 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); 145 } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
146 146
147 /* Deal with counter wrap */ 147 /* Deal with counter wrap */
148 diff = cur_vblank - vblank->last; 148 diff = cur_vblank - vblank->last;
149 if (cur_vblank < vblank->last) { 149 if (cur_vblank < vblank->last) {
150 diff += dev->max_vblank_count + 1; 150 diff += dev->max_vblank_count + 1;
151 151
152 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 152 DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
153 crtc, vblank->last, cur_vblank, diff); 153 pipe, vblank->last, cur_vblank, diff);
154 } 154 }
155 155
156 DRM_DEBUG("updating vblank count on crtc %d, missed %d\n", 156 DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
157 crtc, diff); 157 pipe, diff);
158 158
159 if (diff == 0) 159 if (diff == 0)
160 return; 160 return;
@@ -167,7 +167,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
167 if (!rc) 167 if (!rc)
168 t_vblank = (struct timeval) {0, 0}; 168 t_vblank = (struct timeval) {0, 0};
169 169
170 store_vblank(dev, crtc, diff, &t_vblank); 170 store_vblank(dev, pipe, diff, &t_vblank);
171} 171}
172 172
173/* 173/*
@@ -176,9 +176,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
176 * are preserved, even if there are any spurious vblank irq's after 176 * are preserved, even if there are any spurious vblank irq's after
177 * disable. 177 * disable.
178 */ 178 */
179static void vblank_disable_and_save(struct drm_device *dev, int crtc) 179static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
180{ 180{
181 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 181 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
182 unsigned long irqflags; 182 unsigned long irqflags;
183 u32 vblcount; 183 u32 vblcount;
184 s64 diff_ns; 184 s64 diff_ns;
@@ -206,8 +206,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
206 * vblank interrupt is disabled. 206 * vblank interrupt is disabled.
207 */ 207 */
208 if (!vblank->enabled && 208 if (!vblank->enabled &&
209 drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) { 209 drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
210 drm_update_vblank_count(dev, crtc); 210 drm_update_vblank_count(dev, pipe);
211 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 211 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
212 return; 212 return;
213 } 213 }
@@ -218,7 +218,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
218 * hardware potentially runtime suspended. 218 * hardware potentially runtime suspended.
219 */ 219 */
220 if (vblank->enabled) { 220 if (vblank->enabled) {
221 dev->driver->disable_vblank(dev, crtc); 221 dev->driver->disable_vblank(dev, pipe);
222 vblank->enabled = false; 222 vblank->enabled = false;
223 } 223 }
224 224
@@ -235,9 +235,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
235 * delayed gpu counter increment. 235 * delayed gpu counter increment.
236 */ 236 */
237 do { 237 do {
238 vblank->last = dev->driver->get_vblank_counter(dev, crtc); 238 vblank->last = dev->driver->get_vblank_counter(dev, pipe);
239 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 239 vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
240 } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); 240 } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
241 241
242 if (!count) 242 if (!count)
243 vblrc = 0; 243 vblrc = 0;
@@ -247,7 +247,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
247 */ 247 */
248 vblcount = vblank->count; 248 vblcount = vblank->count;
249 diff_ns = timeval_to_ns(&tvblank) - 249 diff_ns = timeval_to_ns(&tvblank) -
250 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 250 timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
251 251
252 /* If there is at least 1 msec difference between the last stored 252 /* If there is at least 1 msec difference between the last stored
253 * timestamp and tvblank, then we are currently executing our 253 * timestamp and tvblank, then we are currently executing our
@@ -262,7 +262,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
262 * hope for the best. 262 * hope for the best.
263 */ 263 */
264 if (vblrc && (abs64(diff_ns) > 1000000)) 264 if (vblrc && (abs64(diff_ns) > 1000000))
265 store_vblank(dev, crtc, 1, &tvblank); 265 store_vblank(dev, pipe, 1, &tvblank);
266 266
267 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 267 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
268} 268}
@@ -271,16 +271,16 @@ static void vblank_disable_fn(unsigned long arg)
271{ 271{
272 struct drm_vblank_crtc *vblank = (void *)arg; 272 struct drm_vblank_crtc *vblank = (void *)arg;
273 struct drm_device *dev = vblank->dev; 273 struct drm_device *dev = vblank->dev;
274 unsigned int pipe = vblank->pipe;
274 unsigned long irqflags; 275 unsigned long irqflags;
275 int crtc = vblank->crtc;
276 276
277 if (!dev->vblank_disable_allowed) 277 if (!dev->vblank_disable_allowed)
278 return; 278 return;
279 279
280 spin_lock_irqsave(&dev->vbl_lock, irqflags); 280 spin_lock_irqsave(&dev->vbl_lock, irqflags);
281 if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) { 281 if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
282 DRM_DEBUG("disabling vblank on crtc %d\n", crtc); 282 DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
283 vblank_disable_and_save(dev, crtc); 283 vblank_disable_and_save(dev, pipe);
284 } 284 }
285 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 285 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
286} 286}
@@ -293,14 +293,14 @@ static void vblank_disable_fn(unsigned long arg)
293 */ 293 */
294void drm_vblank_cleanup(struct drm_device *dev) 294void drm_vblank_cleanup(struct drm_device *dev)
295{ 295{
296 int crtc; 296 unsigned int pipe;
297 297
298 /* Bail if the driver didn't call drm_vblank_init() */ 298 /* Bail if the driver didn't call drm_vblank_init() */
299 if (dev->num_crtcs == 0) 299 if (dev->num_crtcs == 0)
300 return; 300 return;
301 301
302 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 302 for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
303 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 303 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
304 304
305 WARN_ON(vblank->enabled && 305 WARN_ON(vblank->enabled &&
306 drm_core_check_feature(dev, DRIVER_MODESET)); 306 drm_core_check_feature(dev, DRIVER_MODESET));
@@ -316,17 +316,18 @@ EXPORT_SYMBOL(drm_vblank_cleanup);
316 316
317/** 317/**
318 * drm_vblank_init - initialize vblank support 318 * drm_vblank_init - initialize vblank support
319 * @dev: drm_device 319 * @dev: DRM device
320 * @num_crtcs: number of crtcs supported by @dev 320 * @num_crtcs: number of CRTCs supported by @dev
321 * 321 *
322 * This function initializes vblank support for @num_crtcs display pipelines. 322 * This function initializes vblank support for @num_crtcs display pipelines.
323 * 323 *
324 * Returns: 324 * Returns:
325 * Zero on success or a negative error code on failure. 325 * Zero on success or a negative error code on failure.
326 */ 326 */
327int drm_vblank_init(struct drm_device *dev, int num_crtcs) 327int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
328{ 328{
329 int i, ret = -ENOMEM; 329 int ret = -ENOMEM;
330 unsigned int i;
330 331
331 spin_lock_init(&dev->vbl_lock); 332 spin_lock_init(&dev->vbl_lock);
332 spin_lock_init(&dev->vblank_time_lock); 333 spin_lock_init(&dev->vblank_time_lock);
@@ -341,7 +342,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
341 struct drm_vblank_crtc *vblank = &dev->vblank[i]; 342 struct drm_vblank_crtc *vblank = &dev->vblank[i];
342 343
343 vblank->dev = dev; 344 vblank->dev = dev;
344 vblank->crtc = i; 345 vblank->pipe = i;
345 init_waitqueue_head(&vblank->queue); 346 init_waitqueue_head(&vblank->queue);
346 setup_timer(&vblank->disable_timer, vblank_disable_fn, 347 setup_timer(&vblank->disable_timer, vblank_disable_fn,
347 (unsigned long)vblank); 348 (unsigned long)vblank);
@@ -624,17 +625,17 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
624 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 625 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
625 framedur_ns /= 2; 626 framedur_ns /= 2;
626 } else 627 } else
627 DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", 628 DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
628 crtc->base.id); 629 crtc->base.id);
629 630
630 crtc->pixeldur_ns = pixeldur_ns; 631 crtc->pixeldur_ns = pixeldur_ns;
631 crtc->linedur_ns = linedur_ns; 632 crtc->linedur_ns = linedur_ns;
632 crtc->framedur_ns = framedur_ns; 633 crtc->framedur_ns = framedur_ns;
633 634
634 DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", 635 DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
635 crtc->base.id, mode->crtc_htotal, 636 crtc->base.id, mode->crtc_htotal,
636 mode->crtc_vtotal, mode->crtc_vdisplay); 637 mode->crtc_vtotal, mode->crtc_vdisplay);
637 DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", 638 DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
638 crtc->base.id, dotclock, framedur_ns, 639 crtc->base.id, dotclock, framedur_ns,
639 linedur_ns, pixeldur_ns); 640 linedur_ns, pixeldur_ns);
640} 641}
@@ -643,7 +644,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
643/** 644/**
644 * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper 645 * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
645 * @dev: DRM device 646 * @dev: DRM device
646 * @crtc: Which CRTC's vblank timestamp to retrieve 647 * @pipe: index of CRTC whose vblank timestamp to retrieve
647 * @max_error: Desired maximum allowable error in timestamps (nanosecs) 648 * @max_error: Desired maximum allowable error in timestamps (nanosecs)
648 * On return contains true maximum error of timestamp 649 * On return contains true maximum error of timestamp
649 * @vblank_time: Pointer to struct timeval which should receive the timestamp 650 * @vblank_time: Pointer to struct timeval which should receive the timestamp
@@ -686,7 +687,8 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
686 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. 687 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
687 * 688 *
688 */ 689 */
689int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, 690int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
691 unsigned int pipe,
690 int *max_error, 692 int *max_error,
691 struct timeval *vblank_time, 693 struct timeval *vblank_time,
692 unsigned flags, 694 unsigned flags,
@@ -700,8 +702,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
700 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; 702 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
701 bool invbl; 703 bool invbl;
702 704
703 if (crtc < 0 || crtc >= dev->num_crtcs) { 705 if (pipe >= dev->num_crtcs) {
704 DRM_ERROR("Invalid crtc %d\n", crtc); 706 DRM_ERROR("Invalid crtc %u\n", pipe);
705 return -EINVAL; 707 return -EINVAL;
706 } 708 }
707 709
@@ -720,7 +722,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
720 * Happens during initial modesetting of a crtc. 722 * Happens during initial modesetting of a crtc.
721 */ 723 */
722 if (framedur_ns == 0) { 724 if (framedur_ns == 0) {
723 DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); 725 DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
724 return -EAGAIN; 726 return -EAGAIN;
725 } 727 }
726 728
@@ -736,13 +738,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
736 * Get vertical and horizontal scanout position vpos, hpos, 738 * Get vertical and horizontal scanout position vpos, hpos,
737 * and bounding timestamps stime, etime, pre/post query. 739 * and bounding timestamps stime, etime, pre/post query.
738 */ 740 */
739 vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, 741 vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
740 &hpos, &stime, &etime); 742 &hpos, &stime, &etime);
741 743
742 /* Return as no-op if scanout query unsupported or failed. */ 744 /* Return as no-op if scanout query unsupported or failed. */
743 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 745 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
744 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 746 DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
745 crtc, vbl_status); 747 pipe, vbl_status);
746 return -EIO; 748 return -EIO;
747 } 749 }
748 750
@@ -756,8 +758,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
756 758
757 /* Noisy system timing? */ 759 /* Noisy system timing? */
758 if (i == DRM_TIMESTAMP_MAXRETRIES) { 760 if (i == DRM_TIMESTAMP_MAXRETRIES) {
759 DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", 761 DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
760 crtc, duration_ns/1000, *max_error/1000, i); 762 pipe, duration_ns/1000, *max_error/1000, i);
761 } 763 }
762 764
763 /* Return upper bound of timestamp precision error. */ 765 /* Return upper bound of timestamp precision error. */
@@ -790,8 +792,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
790 etime = ktime_sub_ns(etime, delta_ns); 792 etime = ktime_sub_ns(etime, delta_ns);
791 *vblank_time = ktime_to_timeval(etime); 793 *vblank_time = ktime_to_timeval(etime);
792 794
793 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 795 DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
794 crtc, (int)vbl_status, hpos, vpos, 796 pipe, (int)vbl_status, hpos, vpos,
795 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, 797 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
796 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 798 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
797 duration_ns/1000, i); 799 duration_ns/1000, i);
@@ -816,7 +818,7 @@ static struct timeval get_drm_timestamp(void)
816 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 818 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
817 * vblank interval 819 * vblank interval
818 * @dev: DRM device 820 * @dev: DRM device
819 * @crtc: which CRTC's vblank timestamp to retrieve 821 * @pipe: index of CRTC whose vblank timestamp to retrieve
820 * @tvblank: Pointer to target struct timeval which should receive the timestamp 822 * @tvblank: Pointer to target struct timeval which should receive the timestamp
821 * @flags: Flags to pass to driver: 823 * @flags: Flags to pass to driver:
822 * 0 = Default, 824 * 0 = Default,
@@ -833,7 +835,7 @@ static struct timeval get_drm_timestamp(void)
833 * True if timestamp is considered to be very precise, false otherwise. 835 * True if timestamp is considered to be very precise, false otherwise.
834 */ 836 */
835static bool 837static bool
836drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 838drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
837 struct timeval *tvblank, unsigned flags) 839 struct timeval *tvblank, unsigned flags)
838{ 840{
839 int ret; 841 int ret;
@@ -843,7 +845,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
843 845
844 /* Query driver if possible and precision timestamping enabled. */ 846 /* Query driver if possible and precision timestamping enabled. */
845 if (dev->driver->get_vblank_timestamp && (max_error > 0)) { 847 if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
846 ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, 848 ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
847 tvblank, flags); 849 tvblank, flags);
848 if (ret > 0) 850 if (ret > 0)
849 return true; 851 return true;
@@ -860,7 +862,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
860/** 862/**
861 * drm_vblank_count - retrieve "cooked" vblank counter value 863 * drm_vblank_count - retrieve "cooked" vblank counter value
862 * @dev: DRM device 864 * @dev: DRM device
863 * @crtc: which counter to retrieve 865 * @pipe: index of CRTC for which to retrieve the counter
864 * 866 *
865 * Fetches the "cooked" vblank count value that represents the number of 867 * Fetches the "cooked" vblank count value that represents the number of
866 * vblank events since the system was booted, including lost events due to 868 * vblank events since the system was booted, including lost events due to
@@ -871,12 +873,13 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
871 * Returns: 873 * Returns:
872 * The software vblank counter. 874 * The software vblank counter.
873 */ 875 */
874u32 drm_vblank_count(struct drm_device *dev, int crtc) 876u32 drm_vblank_count(struct drm_device *dev, int pipe)
875{ 877{
876 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 878 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
877 879
878 if (WARN_ON(crtc >= dev->num_crtcs)) 880 if (WARN_ON(pipe >= dev->num_crtcs))
879 return 0; 881 return 0;
882
880 return vblank->count; 883 return vblank->count;
881} 884}
882EXPORT_SYMBOL(drm_vblank_count); 885EXPORT_SYMBOL(drm_vblank_count);
@@ -901,11 +904,10 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
901EXPORT_SYMBOL(drm_crtc_vblank_count); 904EXPORT_SYMBOL(drm_crtc_vblank_count);
902 905
903/** 906/**
904 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value 907 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
905 * and the system timestamp corresponding to that vblank counter value. 908 * system timestamp corresponding to that vblank counter value.
906 *
907 * @dev: DRM device 909 * @dev: DRM device
908 * @crtc: which counter to retrieve 910 * @pipe: index of CRTC whose counter to retrieve
909 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. 911 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
910 * 912 *
911 * Fetches the "cooked" vblank count value that represents the number of 913 * Fetches the "cooked" vblank count value that represents the number of
@@ -913,13 +915,13 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
913 * modesetting activity. Returns corresponding system timestamp of the time 915 * modesetting activity. Returns corresponding system timestamp of the time
914 * of the vblank interval that corresponds to the current vblank counter value. 916 * of the vblank interval that corresponds to the current vblank counter value.
915 */ 917 */
916u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 918u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
917 struct timeval *vblanktime) 919 struct timeval *vblanktime)
918{ 920{
919 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 921 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
920 u32 cur_vblank; 922 u32 cur_vblank;
921 923
922 if (WARN_ON(crtc >= dev->num_crtcs)) 924 if (WARN_ON(pipe >= dev->num_crtcs))
923 return 0; 925 return 0;
924 926
925 /* 927 /*
@@ -930,7 +932,7 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
930 do { 932 do {
931 cur_vblank = vblank->count; 933 cur_vblank = vblank->count;
932 smp_rmb(); 934 smp_rmb();
933 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 935 *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
934 smp_rmb(); 936 smp_rmb();
935 } while (cur_vblank != vblank->count); 937 } while (cur_vblank != vblank->count);
936 938
@@ -957,7 +959,7 @@ static void send_vblank_event(struct drm_device *dev,
957/** 959/**
958 * drm_send_vblank_event - helper to send vblank event after pageflip 960 * drm_send_vblank_event - helper to send vblank event after pageflip
959 * @dev: DRM device 961 * @dev: DRM device
960 * @crtc: CRTC in question 962 * @pipe: CRTC index
961 * @e: the event to send 963 * @e: the event to send
962 * 964 *
963 * Updates sequence # and timestamp on event, and sends it to userspace. 965 * Updates sequence # and timestamp on event, and sends it to userspace.
@@ -965,20 +967,20 @@ static void send_vblank_event(struct drm_device *dev,
965 * 967 *
966 * This is the legacy version of drm_crtc_send_vblank_event(). 968 * This is the legacy version of drm_crtc_send_vblank_event().
967 */ 969 */
968void drm_send_vblank_event(struct drm_device *dev, int crtc, 970void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
969 struct drm_pending_vblank_event *e) 971 struct drm_pending_vblank_event *e)
970{ 972{
971 struct timeval now; 973 struct timeval now;
972 unsigned int seq; 974 unsigned int seq;
973 975
974 if (crtc >= 0) { 976 if (dev->num_crtcs > 0) {
975 seq = drm_vblank_count_and_time(dev, crtc, &now); 977 seq = drm_vblank_count_and_time(dev, pipe, &now);
976 } else { 978 } else {
977 seq = 0; 979 seq = 0;
978 980
979 now = get_drm_timestamp(); 981 now = get_drm_timestamp();
980 } 982 }
981 e->pipe = crtc; 983 e->pipe = pipe;
982 send_vblank_event(dev, e, seq, &now); 984 send_vblank_event(dev, e, seq, &now);
983} 985}
984EXPORT_SYMBOL(drm_send_vblank_event); 986EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1003,11 +1005,14 @@ EXPORT_SYMBOL(drm_crtc_send_vblank_event);
1003/** 1005/**
1004 * drm_vblank_enable - enable the vblank interrupt on a CRTC 1006 * drm_vblank_enable - enable the vblank interrupt on a CRTC
1005 * @dev: DRM device 1007 * @dev: DRM device
1006 * @crtc: CRTC in question 1008 * @pipe: CRTC index
1009 *
1010 * Returns:
1011 * Zero on success or a negative error code on failure.
1007 */ 1012 */
1008static int drm_vblank_enable(struct drm_device *dev, int crtc) 1013static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
1009{ 1014{
1010 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1015 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1011 int ret = 0; 1016 int ret = 0;
1012 1017
1013 assert_spin_locked(&dev->vbl_lock); 1018 assert_spin_locked(&dev->vbl_lock);
@@ -1022,13 +1027,13 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
1022 * timestamps. Filtercode in drm_handle_vblank() will 1027 * timestamps. Filtercode in drm_handle_vblank() will
1023 * prevent double-accounting of same vblank interval. 1028 * prevent double-accounting of same vblank interval.
1024 */ 1029 */
1025 ret = dev->driver->enable_vblank(dev, crtc); 1030 ret = dev->driver->enable_vblank(dev, pipe);
1026 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 1031 DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
1027 if (ret) 1032 if (ret)
1028 atomic_dec(&vblank->refcount); 1033 atomic_dec(&vblank->refcount);
1029 else { 1034 else {
1030 vblank->enabled = true; 1035 vblank->enabled = true;
1031 drm_update_vblank_count(dev, crtc); 1036 drm_update_vblank_count(dev, pipe);
1032 } 1037 }
1033 } 1038 }
1034 1039
@@ -1040,7 +1045,7 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
1040/** 1045/**
1041 * drm_vblank_get - get a reference count on vblank events 1046 * drm_vblank_get - get a reference count on vblank events
1042 * @dev: DRM device 1047 * @dev: DRM device
1043 * @crtc: which CRTC to own 1048 * @pipe: index of CRTC to own
1044 * 1049 *
1045 * Acquire a reference count on vblank events to avoid having them disabled 1050 * Acquire a reference count on vblank events to avoid having them disabled
1046 * while in use. 1051 * while in use.
@@ -1048,24 +1053,24 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
1048 * This is the legacy version of drm_crtc_vblank_get(). 1053 * This is the legacy version of drm_crtc_vblank_get().
1049 * 1054 *
1050 * Returns: 1055 * Returns:
1051 * Zero on success, nonzero on failure. 1056 * Zero on success or a negative error code on failure.
1052 */ 1057 */
1053int drm_vblank_get(struct drm_device *dev, int crtc) 1058int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1054{ 1059{
1055 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1060 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1056 unsigned long irqflags; 1061 unsigned long irqflags;
1057 int ret = 0; 1062 int ret = 0;
1058 1063
1059 if (!dev->num_crtcs) 1064 if (!dev->num_crtcs)
1060 return -EINVAL; 1065 return -EINVAL;
1061 1066
1062 if (WARN_ON(crtc >= dev->num_crtcs)) 1067 if (WARN_ON(pipe >= dev->num_crtcs))
1063 return -EINVAL; 1068 return -EINVAL;
1064 1069
1065 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1070 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1066 /* Going from 0->1 means we have to enable interrupts again */ 1071 /* Going from 0->1 means we have to enable interrupts again */
1067 if (atomic_add_return(1, &vblank->refcount) == 1) { 1072 if (atomic_add_return(1, &vblank->refcount) == 1) {
1068 ret = drm_vblank_enable(dev, crtc); 1073 ret = drm_vblank_enable(dev, pipe);
1069 } else { 1074 } else {
1070 if (!vblank->enabled) { 1075 if (!vblank->enabled) {
1071 atomic_dec(&vblank->refcount); 1076 atomic_dec(&vblank->refcount);
@@ -1088,7 +1093,7 @@ EXPORT_SYMBOL(drm_vblank_get);
1088 * This is the native kms version of drm_vblank_get(). 1093 * This is the native kms version of drm_vblank_get().
1089 * 1094 *
1090 * Returns: 1095 * Returns:
1091 * Zero on success, nonzero on failure. 1096 * Zero on success or a negative error code on failure.
1092 */ 1097 */
1093int drm_crtc_vblank_get(struct drm_crtc *crtc) 1098int drm_crtc_vblank_get(struct drm_crtc *crtc)
1094{ 1099{
@@ -1097,23 +1102,23 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
1097EXPORT_SYMBOL(drm_crtc_vblank_get); 1102EXPORT_SYMBOL(drm_crtc_vblank_get);
1098 1103
1099/** 1104/**
1100 * drm_vblank_put - give up ownership of vblank events 1105 * drm_vblank_put - release ownership of vblank events
1101 * @dev: DRM device 1106 * @dev: DRM device
1102 * @crtc: which counter to give up 1107 * @pipe: index of CRTC to release
1103 * 1108 *
1104 * Release ownership of a given vblank counter, turning off interrupts 1109 * Release ownership of a given vblank counter, turning off interrupts
1105 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 1110 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
1106 * 1111 *
1107 * This is the legacy version of drm_crtc_vblank_put(). 1112 * This is the legacy version of drm_crtc_vblank_put().
1108 */ 1113 */
1109void drm_vblank_put(struct drm_device *dev, int crtc) 1114void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1110{ 1115{
1111 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1116 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1112 1117
1113 if (WARN_ON(atomic_read(&vblank->refcount) == 0)) 1118 if (WARN_ON(pipe >= dev->num_crtcs))
1114 return; 1119 return;
1115 1120
1116 if (WARN_ON(crtc >= dev->num_crtcs)) 1121 if (WARN_ON(atomic_read(&vblank->refcount) == 0))
1117 return; 1122 return;
1118 1123
1119 /* Last user schedules interrupt disable */ 1124 /* Last user schedules interrupt disable */
@@ -1147,30 +1152,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
1147/** 1152/**
1148 * drm_wait_one_vblank - wait for one vblank 1153 * drm_wait_one_vblank - wait for one vblank
1149 * @dev: DRM device 1154 * @dev: DRM device
1150 * @crtc: crtc index 1155 * @pipe: CRTC index
1151 * 1156 *
1152 * This waits for one vblank to pass on @crtc, using the irq driver interfaces. 1157 * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
1153 * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. 1158 * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
1154 * due to lack of driver support or because the crtc is off. 1159 * due to lack of driver support or because the crtc is off.
1155 */ 1160 */
1156void drm_wait_one_vblank(struct drm_device *dev, int crtc) 1161void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
1157{ 1162{
1163 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1158 int ret; 1164 int ret;
1159 u32 last; 1165 u32 last;
1160 1166
1161 ret = drm_vblank_get(dev, crtc); 1167 if (WARN_ON(pipe >= dev->num_crtcs))
1162 if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
1163 return; 1168 return;
1164 1169
1165 last = drm_vblank_count(dev, crtc); 1170 ret = drm_vblank_get(dev, pipe);
1171 if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
1172 return;
1166 1173
1167 ret = wait_event_timeout(dev->vblank[crtc].queue, 1174 last = drm_vblank_count(dev, pipe);
1168 last != drm_vblank_count(dev, crtc), 1175
1176 ret = wait_event_timeout(vblank->queue,
1177 last != drm_vblank_count(dev, pipe),
1169 msecs_to_jiffies(100)); 1178 msecs_to_jiffies(100));
1170 1179
1171 WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc); 1180 WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
1172 1181
1173 drm_vblank_put(dev, crtc); 1182 drm_vblank_put(dev, pipe);
1174} 1183}
1175EXPORT_SYMBOL(drm_wait_one_vblank); 1184EXPORT_SYMBOL(drm_wait_one_vblank);
1176 1185
@@ -1191,7 +1200,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
1191/** 1200/**
1192 * drm_vblank_off - disable vblank events on a CRTC 1201 * drm_vblank_off - disable vblank events on a CRTC
1193 * @dev: DRM device 1202 * @dev: DRM device
1194 * @crtc: CRTC in question 1203 * @pipe: CRTC index
1195 * 1204 *
1196 * Drivers can use this function to shut down the vblank interrupt handling when 1205 * Drivers can use this function to shut down the vblank interrupt handling when
1197 * disabling a crtc. This function ensures that the latest vblank frame count is 1206 * disabling a crtc. This function ensures that the latest vblank frame count is
@@ -1202,21 +1211,21 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
1202 * 1211 *
1203 * This is the legacy version of drm_crtc_vblank_off(). 1212 * This is the legacy version of drm_crtc_vblank_off().
1204 */ 1213 */
1205void drm_vblank_off(struct drm_device *dev, int crtc) 1214void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1206{ 1215{
1207 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1216 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1208 struct drm_pending_vblank_event *e, *t; 1217 struct drm_pending_vblank_event *e, *t;
1209 struct timeval now; 1218 struct timeval now;
1210 unsigned long irqflags; 1219 unsigned long irqflags;
1211 unsigned int seq; 1220 unsigned int seq;
1212 1221
1213 if (WARN_ON(crtc >= dev->num_crtcs)) 1222 if (WARN_ON(pipe >= dev->num_crtcs))
1214 return; 1223 return;
1215 1224
1216 spin_lock_irqsave(&dev->event_lock, irqflags); 1225 spin_lock_irqsave(&dev->event_lock, irqflags);
1217 1226
1218 spin_lock(&dev->vbl_lock); 1227 spin_lock(&dev->vbl_lock);
1219 vblank_disable_and_save(dev, crtc); 1228 vblank_disable_and_save(dev, pipe);
1220 wake_up(&vblank->queue); 1229 wake_up(&vblank->queue);
1221 1230
1222 /* 1231 /*
@@ -1230,16 +1239,16 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1230 spin_unlock(&dev->vbl_lock); 1239 spin_unlock(&dev->vbl_lock);
1231 1240
1232 /* Send any queued vblank events, lest the natives grow disquiet */ 1241 /* Send any queued vblank events, lest the natives grow disquiet */
1233 seq = drm_vblank_count_and_time(dev, crtc, &now); 1242 seq = drm_vblank_count_and_time(dev, pipe, &now);
1234 1243
1235 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1244 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1236 if (e->pipe != crtc) 1245 if (e->pipe != pipe)
1237 continue; 1246 continue;
1238 DRM_DEBUG("Sending premature vblank event on disable: \ 1247 DRM_DEBUG("Sending premature vblank event on disable: \
1239 wanted %d, current %d\n", 1248 wanted %d, current %d\n",
1240 e->event.sequence, seq); 1249 e->event.sequence, seq);
1241 list_del(&e->base.link); 1250 list_del(&e->base.link);
1242 drm_vblank_put(dev, e->pipe); 1251 drm_vblank_put(dev, pipe);
1243 send_vblank_event(dev, e, seq, &now); 1252 send_vblank_event(dev, e, seq, &now);
1244 } 1253 }
1245 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1254 spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1300,7 +1309,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
1300/** 1309/**
1301 * drm_vblank_on - enable vblank events on a CRTC 1310 * drm_vblank_on - enable vblank events on a CRTC
1302 * @dev: DRM device 1311 * @dev: DRM device
1303 * @crtc: CRTC in question 1312 * @pipe: CRTC index
1304 * 1313 *
1305 * This functions restores the vblank interrupt state captured with 1314 * This functions restores the vblank interrupt state captured with
1306 * drm_vblank_off() again. Note that calls to drm_vblank_on() and 1315 * drm_vblank_off() again. Note that calls to drm_vblank_on() and
@@ -1309,12 +1318,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
1309 * 1318 *
1310 * This is the legacy version of drm_crtc_vblank_on(). 1319 * This is the legacy version of drm_crtc_vblank_on().
1311 */ 1320 */
1312void drm_vblank_on(struct drm_device *dev, int crtc) 1321void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1313{ 1322{
1314 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1323 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1315 unsigned long irqflags; 1324 unsigned long irqflags;
1316 1325
1317 if (WARN_ON(crtc >= dev->num_crtcs)) 1326 if (WARN_ON(pipe >= dev->num_crtcs))
1318 return; 1327 return;
1319 1328
1320 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1329 spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1332,7 +1341,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
1332 * vblank counter value before and after a modeset 1341 * vblank counter value before and after a modeset
1333 */ 1342 */
1334 vblank->last = 1343 vblank->last =
1335 (dev->driver->get_vblank_counter(dev, crtc) - 1) & 1344 (dev->driver->get_vblank_counter(dev, pipe) - 1) &
1336 dev->max_vblank_count; 1345 dev->max_vblank_count;
1337 /* 1346 /*
1338 * re-enable interrupts if there are users left, or the 1347 * re-enable interrupts if there are users left, or the
@@ -1340,7 +1349,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
1340 */ 1349 */
1341 if (atomic_read(&vblank->refcount) != 0 || 1350 if (atomic_read(&vblank->refcount) != 0 ||
1342 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0)) 1351 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1343 WARN_ON(drm_vblank_enable(dev, crtc)); 1352 WARN_ON(drm_vblank_enable(dev, pipe));
1344 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1353 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1345} 1354}
1346EXPORT_SYMBOL(drm_vblank_on); 1355EXPORT_SYMBOL(drm_vblank_on);
@@ -1365,7 +1374,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
1365/** 1374/**
1366 * drm_vblank_pre_modeset - account for vblanks across mode sets 1375 * drm_vblank_pre_modeset - account for vblanks across mode sets
1367 * @dev: DRM device 1376 * @dev: DRM device
1368 * @crtc: CRTC in question 1377 * @pipe: CRTC index
1369 * 1378 *
1370 * Account for vblank events across mode setting events, which will likely 1379 * Account for vblank events across mode setting events, which will likely
1371 * reset the hardware frame counter. 1380 * reset the hardware frame counter.
@@ -1385,15 +1394,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
1385 * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc 1394 * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
1386 * again. 1395 * again.
1387 */ 1396 */
1388void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 1397void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
1389{ 1398{
1390 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1399 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1391 1400
1392 /* vblank is not initialized (IRQ not installed ?), or has been freed */ 1401 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1393 if (!dev->num_crtcs) 1402 if (!dev->num_crtcs)
1394 return; 1403 return;
1395 1404
1396 if (WARN_ON(crtc >= dev->num_crtcs)) 1405 if (WARN_ON(pipe >= dev->num_crtcs))
1397 return; 1406 return;
1398 1407
1399 /* 1408 /*
@@ -1405,7 +1414,7 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1405 */ 1414 */
1406 if (!vblank->inmodeset) { 1415 if (!vblank->inmodeset) {
1407 vblank->inmodeset = 0x1; 1416 vblank->inmodeset = 0x1;
1408 if (drm_vblank_get(dev, crtc) == 0) 1417 if (drm_vblank_get(dev, pipe) == 0)
1409 vblank->inmodeset |= 0x2; 1418 vblank->inmodeset |= 0x2;
1410 } 1419 }
1411} 1420}
@@ -1414,27 +1423,30 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
1414/** 1423/**
1415 * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes 1424 * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
1416 * @dev: DRM device 1425 * @dev: DRM device
1417 * @crtc: CRTC in question 1426 * @pipe: CRTC index
1418 * 1427 *
1419 * This function again drops the temporary vblank reference acquired in 1428 * This function again drops the temporary vblank reference acquired in
1420 * drm_vblank_pre_modeset. 1429 * drm_vblank_pre_modeset.
1421 */ 1430 */
1422void drm_vblank_post_modeset(struct drm_device *dev, int crtc) 1431void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
1423{ 1432{
1424 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1433 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1425 unsigned long irqflags; 1434 unsigned long irqflags;
1426 1435
1427 /* vblank is not initialized (IRQ not installed ?), or has been freed */ 1436 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1428 if (!dev->num_crtcs) 1437 if (!dev->num_crtcs)
1429 return; 1438 return;
1430 1439
1440 if (WARN_ON(pipe >= dev->num_crtcs))
1441 return;
1442
1431 if (vblank->inmodeset) { 1443 if (vblank->inmodeset) {
1432 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1444 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1433 dev->vblank_disable_allowed = true; 1445 dev->vblank_disable_allowed = true;
1434 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1446 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1435 1447
1436 if (vblank->inmodeset & 0x2) 1448 if (vblank->inmodeset & 0x2)
1437 drm_vblank_put(dev, crtc); 1449 drm_vblank_put(dev, pipe);
1438 1450
1439 vblank->inmodeset = 0; 1451 vblank->inmodeset = 0;
1440 } 1452 }
@@ -1456,7 +1468,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1456 struct drm_file *file_priv) 1468 struct drm_file *file_priv)
1457{ 1469{
1458 struct drm_modeset_ctl *modeset = data; 1470 struct drm_modeset_ctl *modeset = data;
1459 unsigned int crtc; 1471 unsigned int pipe;
1460 1472
1461 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1473 /* If drm_vblank_init() hasn't been called yet, just no-op */
1462 if (!dev->num_crtcs) 1474 if (!dev->num_crtcs)
@@ -1466,16 +1478,16 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1466 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1478 if (drm_core_check_feature(dev, DRIVER_MODESET))
1467 return 0; 1479 return 0;
1468 1480
1469 crtc = modeset->crtc; 1481 pipe = modeset->crtc;
1470 if (crtc >= dev->num_crtcs) 1482 if (pipe >= dev->num_crtcs)
1471 return -EINVAL; 1483 return -EINVAL;
1472 1484
1473 switch (modeset->cmd) { 1485 switch (modeset->cmd) {
1474 case _DRM_PRE_MODESET: 1486 case _DRM_PRE_MODESET:
1475 drm_vblank_pre_modeset(dev, crtc); 1487 drm_vblank_pre_modeset(dev, pipe);
1476 break; 1488 break;
1477 case _DRM_POST_MODESET: 1489 case _DRM_POST_MODESET:
1478 drm_vblank_post_modeset(dev, crtc); 1490 drm_vblank_post_modeset(dev, pipe);
1479 break; 1491 break;
1480 default: 1492 default:
1481 return -EINVAL; 1493 return -EINVAL;
@@ -1484,7 +1496,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1484 return 0; 1496 return 0;
1485} 1497}
1486 1498
1487static int drm_queue_vblank_event(struct drm_device *dev, int pipe, 1499static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
1488 union drm_wait_vblank *vblwait, 1500 union drm_wait_vblank *vblwait,
1489 struct drm_file *file_priv) 1501 struct drm_file *file_priv)
1490{ 1502{
@@ -1538,7 +1550,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1538 vblwait->reply.sequence = vblwait->request.sequence; 1550 vblwait->reply.sequence = vblwait->request.sequence;
1539 } 1551 }
1540 1552
1541 DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", 1553 DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
1542 vblwait->request.sequence, seq, pipe); 1554 vblwait->request.sequence, seq, pipe);
1543 1555
1544 trace_drm_vblank_event_queued(current->pid, pipe, 1556 trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1587,7 +1599,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1587 struct drm_vblank_crtc *vblank; 1599 struct drm_vblank_crtc *vblank;
1588 union drm_wait_vblank *vblwait = data; 1600 union drm_wait_vblank *vblwait = data;
1589 int ret; 1601 int ret;
1590 unsigned int flags, seq, crtc, high_crtc; 1602 unsigned int flags, seq, pipe, high_pipe;
1591 1603
1592 if (!dev->irq_enabled) 1604 if (!dev->irq_enabled)
1593 return -EINVAL; 1605 return -EINVAL;
@@ -1606,22 +1618,22 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1606 } 1618 }
1607 1619
1608 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 1620 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
1609 high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); 1621 high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
1610 if (high_crtc) 1622 if (high_pipe)
1611 crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; 1623 pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
1612 else 1624 else
1613 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 1625 pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
1614 if (crtc >= dev->num_crtcs) 1626 if (pipe >= dev->num_crtcs)
1615 return -EINVAL; 1627 return -EINVAL;
1616 1628
1617 vblank = &dev->vblank[crtc]; 1629 vblank = &dev->vblank[pipe];
1618 1630
1619 ret = drm_vblank_get(dev, crtc); 1631 ret = drm_vblank_get(dev, pipe);
1620 if (ret) { 1632 if (ret) {
1621 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); 1633 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
1622 return ret; 1634 return ret;
1623 } 1635 }
1624 seq = drm_vblank_count(dev, crtc); 1636 seq = drm_vblank_count(dev, pipe);
1625 1637
1626 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 1638 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
1627 case _DRM_VBLANK_RELATIVE: 1639 case _DRM_VBLANK_RELATIVE:
@@ -1638,7 +1650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1638 /* must hold on to the vblank ref until the event fires 1650 /* must hold on to the vblank ref until the event fires
1639 * drm_vblank_put will be called asynchronously 1651 * drm_vblank_put will be called asynchronously
1640 */ 1652 */
1641 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1653 return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
1642 } 1654 }
1643 1655
1644 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1656 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -1646,11 +1658,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1646 vblwait->request.sequence = seq + 1; 1658 vblwait->request.sequence = seq + 1;
1647 } 1659 }
1648 1660
1649 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1661 DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
1650 vblwait->request.sequence, crtc); 1662 vblwait->request.sequence, pipe);
1651 vblank->last_wait = vblwait->request.sequence; 1663 vblank->last_wait = vblwait->request.sequence;
1652 DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, 1664 DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
1653 (((drm_vblank_count(dev, crtc) - 1665 (((drm_vblank_count(dev, pipe) -
1654 vblwait->request.sequence) <= (1 << 23)) || 1666 vblwait->request.sequence) <= (1 << 23)) ||
1655 !vblank->enabled || 1667 !vblank->enabled ||
1656 !dev->irq_enabled)); 1668 !dev->irq_enabled));
@@ -1658,7 +1670,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1658 if (ret != -EINTR) { 1670 if (ret != -EINTR) {
1659 struct timeval now; 1671 struct timeval now;
1660 1672
1661 vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); 1673 vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
1662 vblwait->reply.tval_sec = now.tv_sec; 1674 vblwait->reply.tval_sec = now.tv_sec;
1663 vblwait->reply.tval_usec = now.tv_usec; 1675 vblwait->reply.tval_usec = now.tv_usec;
1664 1676
@@ -1669,11 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1669 } 1681 }
1670 1682
1671done: 1683done:
1672 drm_vblank_put(dev, crtc); 1684 drm_vblank_put(dev, pipe);
1673 return ret; 1685 return ret;
1674} 1686}
1675 1687
1676static void drm_handle_vblank_events(struct drm_device *dev, int crtc) 1688static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
1677{ 1689{
1678 struct drm_pending_vblank_event *e, *t; 1690 struct drm_pending_vblank_event *e, *t;
1679 struct timeval now; 1691 struct timeval now;
@@ -1681,10 +1693,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1681 1693
1682 assert_spin_locked(&dev->event_lock); 1694 assert_spin_locked(&dev->event_lock);
1683 1695
1684 seq = drm_vblank_count_and_time(dev, crtc, &now); 1696 seq = drm_vblank_count_and_time(dev, pipe, &now);
1685 1697
1686 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1698 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1687 if (e->pipe != crtc) 1699 if (e->pipe != pipe)
1688 continue; 1700 continue;
1689 if ((seq - e->event.sequence) > (1<<23)) 1701 if ((seq - e->event.sequence) > (1<<23))
1690 continue; 1702 continue;
@@ -1693,26 +1705,26 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1693 e->event.sequence, seq); 1705 e->event.sequence, seq);
1694 1706
1695 list_del(&e->base.link); 1707 list_del(&e->base.link);
1696 drm_vblank_put(dev, e->pipe); 1708 drm_vblank_put(dev, pipe);
1697 send_vblank_event(dev, e, seq, &now); 1709 send_vblank_event(dev, e, seq, &now);
1698 } 1710 }
1699 1711
1700 trace_drm_vblank_event(crtc, seq); 1712 trace_drm_vblank_event(pipe, seq);
1701} 1713}
1702 1714
1703/** 1715/**
1704 * drm_handle_vblank - handle a vblank event 1716 * drm_handle_vblank - handle a vblank event
1705 * @dev: DRM device 1717 * @dev: DRM device
1706 * @crtc: where this event occurred 1718 * @pipe: index of CRTC where this event occurred
1707 * 1719 *
1708 * Drivers should call this routine in their vblank interrupt handlers to 1720 * Drivers should call this routine in their vblank interrupt handlers to
1709 * update the vblank counter and send any signals that may be pending. 1721 * update the vblank counter and send any signals that may be pending.
1710 * 1722 *
1711 * This is the legacy version of drm_crtc_handle_vblank(). 1723 * This is the legacy version of drm_crtc_handle_vblank().
1712 */ 1724 */
1713bool drm_handle_vblank(struct drm_device *dev, int crtc) 1725bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
1714{ 1726{
1715 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1727 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1716 u32 vblcount; 1728 u32 vblcount;
1717 s64 diff_ns; 1729 s64 diff_ns;
1718 struct timeval tvblank; 1730 struct timeval tvblank;
@@ -1721,7 +1733,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1721 if (WARN_ON_ONCE(!dev->num_crtcs)) 1733 if (WARN_ON_ONCE(!dev->num_crtcs))
1722 return false; 1734 return false;
1723 1735
1724 if (WARN_ON(crtc >= dev->num_crtcs)) 1736 if (WARN_ON(pipe >= dev->num_crtcs))
1725 return false; 1737 return false;
1726 1738
1727 spin_lock_irqsave(&dev->event_lock, irqflags); 1739 spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1745,11 +1757,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1745 1757
1746 /* Get current timestamp and count. */ 1758 /* Get current timestamp and count. */
1747 vblcount = vblank->count; 1759 vblcount = vblank->count;
1748 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1760 drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1749 1761
1750 /* Compute time difference to timestamp of last vblank */ 1762 /* Compute time difference to timestamp of last vblank */
1751 diff_ns = timeval_to_ns(&tvblank) - 1763 diff_ns = timeval_to_ns(&tvblank) -
1752 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 1764 timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
1753 1765
1754 /* Update vblank timestamp and count if at least 1766 /* Update vblank timestamp and count if at least
1755 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds 1767 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
@@ -1761,15 +1773,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1761 * ignore those for accounting. 1773 * ignore those for accounting.
1762 */ 1774 */
1763 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) 1775 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
1764 store_vblank(dev, crtc, 1, &tvblank); 1776 store_vblank(dev, pipe, 1, &tvblank);
1765 else 1777 else
1766 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1778 DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
1767 crtc, (int) diff_ns); 1779 pipe, (int) diff_ns);
1768 1780
1769 spin_unlock(&dev->vblank_time_lock); 1781 spin_unlock(&dev->vblank_time_lock);
1770 1782
1771 wake_up(&vblank->queue); 1783 wake_up(&vblank->queue);
1772 drm_handle_vblank_events(dev, crtc); 1784 drm_handle_vblank_events(dev, pipe);
1773 1785
1774 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1786 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1775 1787
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 744dfbc6a329..fba321ca4344 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -55,41 +55,27 @@
55 * drm_modeset_acquire_fini(&ctx); 55 * drm_modeset_acquire_fini(&ctx);
56 */ 56 */
57 57
58
59/** 58/**
60 * __drm_modeset_lock_all - internal helper to grab all modeset locks 59 * drm_modeset_lock_all - take all modeset locks
61 * @dev: DRM device 60 * @dev: drm device
62 * @trylock: trylock mode for atomic contexts
63 *
64 * This is a special version of drm_modeset_lock_all() which can also be used in
65 * atomic contexts. Then @trylock must be set to true.
66 * 61 *
67 * Returns: 62 * This function takes all modeset locks, suitable where a more fine-grained
68 * 0 on success or negative error code on failure. 63 * scheme isn't (yet) implemented. Locks must be dropped with
64 * drm_modeset_unlock_all.
69 */ 65 */
70int __drm_modeset_lock_all(struct drm_device *dev, 66void drm_modeset_lock_all(struct drm_device *dev)
71 bool trylock)
72{ 67{
73 struct drm_mode_config *config = &dev->mode_config; 68 struct drm_mode_config *config = &dev->mode_config;
74 struct drm_modeset_acquire_ctx *ctx; 69 struct drm_modeset_acquire_ctx *ctx;
75 int ret; 70 int ret;
76 71
77 ctx = kzalloc(sizeof(*ctx), 72 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
78 trylock ? GFP_ATOMIC : GFP_KERNEL); 73 if (WARN_ON(!ctx))
79 if (!ctx) 74 return;
80 return -ENOMEM;
81 75
82 if (trylock) { 76 mutex_lock(&config->mutex);
83 if (!mutex_trylock(&config->mutex)) {
84 ret = -EBUSY;
85 goto out;
86 }
87 } else {
88 mutex_lock(&config->mutex);
89 }
90 77
91 drm_modeset_acquire_init(ctx, 0); 78 drm_modeset_acquire_init(ctx, 0);
92 ctx->trylock_only = trylock;
93 79
94retry: 80retry:
95 ret = drm_modeset_lock(&config->connection_mutex, ctx); 81 ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -108,7 +94,7 @@ retry:
108 94
109 drm_warn_on_modeset_not_all_locked(dev); 95 drm_warn_on_modeset_not_all_locked(dev);
110 96
111 return 0; 97 return;
112 98
113fail: 99fail:
114 if (ret == -EDEADLK) { 100 if (ret == -EDEADLK) {
@@ -116,23 +102,7 @@ fail:
116 goto retry; 102 goto retry;
117 } 103 }
118 104
119out:
120 kfree(ctx); 105 kfree(ctx);
121 return ret;
122}
123EXPORT_SYMBOL(__drm_modeset_lock_all);
124
125/**
126 * drm_modeset_lock_all - take all modeset locks
127 * @dev: drm device
128 *
129 * This function takes all modeset locks, suitable where a more fine-grained
130 * scheme isn't (yet) implemented. Locks must be dropped with
131 * drm_modeset_unlock_all.
132 */
133void drm_modeset_lock_all(struct drm_device *dev)
134{
135 WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
136} 106}
137EXPORT_SYMBOL(drm_modeset_lock_all); 107EXPORT_SYMBOL(drm_modeset_lock_all);
138 108
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 43003c4ad80b..df0b61a60501 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_DSI
56 56
57config DRM_EXYNOS_DP 57config DRM_EXYNOS_DP
58 bool "EXYNOS DRM DP driver support" 58 bool "EXYNOS DRM DP driver support"
59 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 59 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
60 default DRM_EXYNOS 60 default DRM_EXYNOS
61 select DRM_PANEL 61 select DRM_PANEL
62 help 62 help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 7de0b1084fcd..02aecfed6354 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,10 +3,9 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos 5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
6exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \ 6exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
7 exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ 7 exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_plane.o
9 exynos_drm_plane.o exynos_drm_dmabuf.o
10 9
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o 10exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 11exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8b1225f245fc..484e312e0a22 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -152,15 +152,15 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
152#define OFFSIZE(x) (((x) & 0x3fff) << 14) 152#define OFFSIZE(x) (((x) & 0x3fff) << 14)
153#define PAGEWIDTH(x) ((x) & 0x3fff) 153#define PAGEWIDTH(x) ((x) & 0x3fff)
154 154
155static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) 155static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
156 struct drm_framebuffer *fb)
156{ 157{
157 struct exynos_drm_plane *plane = &ctx->planes[win];
158 unsigned long val; 158 unsigned long val;
159 159
160 val = readl(ctx->addr + DECON_WINCONx(win)); 160 val = readl(ctx->addr + DECON_WINCONx(win));
161 val &= ~WINCONx_BPPMODE_MASK; 161 val &= ~WINCONx_BPPMODE_MASK;
162 162
163 switch (plane->pixel_format) { 163 switch (fb->pixel_format) {
164 case DRM_FORMAT_XRGB1555: 164 case DRM_FORMAT_XRGB1555:
165 val |= WINCONx_BPPMODE_16BPP_I1555; 165 val |= WINCONx_BPPMODE_16BPP_I1555;
166 val |= WINCONx_HAWSWP_F; 166 val |= WINCONx_HAWSWP_F;
@@ -186,7 +186,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
186 return; 186 return;
187 } 187 }
188 188
189 DRM_DEBUG_KMS("bpp = %u\n", plane->bpp); 189 DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
190 190
191 /* 191 /*
192 * In case of exynos, setting dma-burst to 16Word causes permanent 192 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -196,7 +196,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
196 * movement causes unstable DMA which results into iommu crash/tear. 196 * movement causes unstable DMA which results into iommu crash/tear.
197 */ 197 */
198 198
199 if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { 199 if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
200 val &= ~WINCONx_BURSTLEN_MASK; 200 val &= ~WINCONx_BURSTLEN_MASK;
201 val |= WINCONx_BURSTLEN_8WORD; 201 val |= WINCONx_BURSTLEN_8WORD;
202 } 202 }
@@ -219,17 +219,16 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win,
219 writel(val, ctx->addr + DECON_SHADOWCON); 219 writel(val, ctx->addr + DECON_SHADOWCON);
220} 220}
221 221
222static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) 222static void decon_update_plane(struct exynos_drm_crtc *crtc,
223 struct exynos_drm_plane *plane)
223{ 224{
224 struct decon_context *ctx = crtc->ctx; 225 struct decon_context *ctx = crtc->ctx;
225 struct exynos_drm_plane *plane; 226 struct drm_plane_state *state = plane->base.state;
227 unsigned int win = plane->zpos;
228 unsigned int bpp = state->fb->bits_per_pixel >> 3;
229 unsigned int pitch = state->fb->pitches[0];
226 u32 val; 230 u32 val;
227 231
228 if (win < 0 || win >= WINDOWS_NR)
229 return;
230
231 plane = &ctx->planes[win];
232
233 if (ctx->suspended) 232 if (ctx->suspended)
234 return; 233 return;
235 234
@@ -238,8 +237,8 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
238 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); 237 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
239 writel(val, ctx->addr + DECON_VIDOSDxA(win)); 238 writel(val, ctx->addr + DECON_VIDOSDxA(win));
240 239
241 val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) | 240 val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
242 COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1); 241 COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
243 writel(val, ctx->addr + DECON_VIDOSDxB(win)); 242 writel(val, ctx->addr + DECON_VIDOSDxB(win));
244 243
245 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | 244 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -252,14 +251,14 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
252 251
253 writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win)); 252 writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
254 253
255 val = plane->dma_addr[0] + plane->pitch * plane->crtc_height; 254 val = plane->dma_addr[0] + pitch * plane->crtc_h;
256 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); 255 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
257 256
258 val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3)) 257 val = OFFSIZE(pitch - plane->crtc_w * bpp)
259 | PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3)); 258 | PAGEWIDTH(plane->crtc_w * bpp);
260 writel(val, ctx->addr + DECON_VIDW0xADD2(win)); 259 writel(val, ctx->addr + DECON_VIDW0xADD2(win));
261 260
262 decon_win_set_pixfmt(ctx, win); 261 decon_win_set_pixfmt(ctx, win, state->fb);
263 262
264 /* window enable */ 263 /* window enable */
265 val = readl(ctx->addr + DECON_WINCONx(win)); 264 val = readl(ctx->addr + DECON_WINCONx(win));
@@ -277,17 +276,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
277 atomic_set(&ctx->win_updated, 1); 276 atomic_set(&ctx->win_updated, 1);
278} 277}
279 278
280static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 279static void decon_disable_plane(struct exynos_drm_crtc *crtc,
280 struct exynos_drm_plane *plane)
281{ 281{
282 struct decon_context *ctx = crtc->ctx; 282 struct decon_context *ctx = crtc->ctx;
283 struct exynos_drm_plane *plane; 283 unsigned int win = plane->zpos;
284 u32 val; 284 u32 val;
285 285
286 if (win < 0 || win >= WINDOWS_NR)
287 return;
288
289 plane = &ctx->planes[win];
290
291 if (ctx->suspended) 286 if (ctx->suspended)
292 return; 287 return;
293 288
@@ -378,7 +373,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
378 * a destroyed buffer later. 373 * a destroyed buffer later.
379 */ 374 */
380 for (i = 0; i < WINDOWS_NR; i++) 375 for (i = 0; i < WINDOWS_NR; i++)
381 decon_win_disable(crtc, i); 376 decon_disable_plane(crtc, &ctx->planes[i]);
382 377
383 decon_swreset(ctx); 378 decon_swreset(ctx);
384 379
@@ -407,7 +402,7 @@ void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
407 writel(val, ctx->addr + DECON_TRIGCON); 402 writel(val, ctx->addr + DECON_TRIGCON);
408 } 403 }
409 404
410 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 405 drm_crtc_handle_vblank(&ctx->crtc->base);
411} 406}
412 407
413static void decon_clear_channels(struct exynos_drm_crtc *crtc) 408static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -460,10 +455,9 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
460 .enable_vblank = decon_enable_vblank, 455 .enable_vblank = decon_enable_vblank,
461 .disable_vblank = decon_disable_vblank, 456 .disable_vblank = decon_disable_vblank,
462 .commit = decon_commit, 457 .commit = decon_commit,
463 .win_commit = decon_win_commit, 458 .update_plane = decon_update_plane,
464 .win_disable = decon_win_disable, 459 .disable_plane = decon_disable_plane,
465 .te_handler = decon_te_irq_handler, 460 .te_handler = decon_te_irq_handler,
466 .clear_channels = decon_clear_channels,
467}; 461};
468 462
469static int decon_bind(struct device *dev, struct device *master, void *data) 463static int decon_bind(struct device *dev, struct device *master, void *data)
@@ -497,7 +491,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
497 goto err; 491 goto err;
498 } 492 }
499 493
500 ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev); 494 decon_clear_channels(ctx->crtc);
495
496 ret = drm_iommu_attach_device(drm_dev, dev);
501 if (ret) 497 if (ret)
502 goto err; 498 goto err;
503 499
@@ -514,8 +510,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
514 decon_disable(ctx->crtc); 510 decon_disable(ctx->crtc);
515 511
516 /* detach this sub driver from iommu mapping if supported. */ 512 /* detach this sub driver from iommu mapping if supported. */
517 if (is_drm_iommu_supported(ctx->drm_dev)) 513 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
518 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
519} 514}
520 515
521static const struct component_ops decon_component_ops = { 516static const struct component_ops decon_component_ops = {
@@ -533,7 +528,7 @@ static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id)
533 528
534 val = readl(ctx->addr + DECON_VIDINTCON1); 529 val = readl(ctx->addr + DECON_VIDINTCON1);
535 if (val & VIDINTCON1_INTFRMPEND) { 530 if (val & VIDINTCON1_INTFRMPEND) {
536 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 531 drm_crtc_handle_vblank(&ctx->crtc->base);
537 532
538 /* clear */ 533 /* clear */
539 writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1); 534 writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
@@ -553,7 +548,7 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
553 548
554 val = readl(ctx->addr + DECON_VIDINTCON1); 549 val = readl(ctx->addr + DECON_VIDINTCON1);
555 if (val & VIDINTCON1_INTFRMDONEPEND) { 550 if (val & VIDINTCON1_INTFRMDONEPEND) {
556 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 551 exynos_drm_crtc_finish_pageflip(ctx->crtc);
557 552
558 /* clear */ 553 /* clear */
559 writel(VIDINTCON1_INTFRMDONEPEND, 554 writel(VIDINTCON1_INTFRMDONEPEND,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 362532afd1a5..07926547c94f 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -61,7 +61,7 @@ struct decon_context {
61 atomic_t wait_vsync_event; 61 atomic_t wait_vsync_event;
62 62
63 struct exynos_drm_panel_info panel; 63 struct exynos_drm_panel_info panel;
64 struct exynos_drm_display *display; 64 struct drm_encoder *encoder;
65}; 65};
66 66
67static const struct of_device_id decon_driver_dt_match[] = { 67static const struct of_device_id decon_driver_dt_match[] = {
@@ -126,7 +126,9 @@ static int decon_ctx_initialize(struct decon_context *ctx,
126 ctx->drm_dev = drm_dev; 126 ctx->drm_dev = drm_dev;
127 ctx->pipe = priv->pipe++; 127 ctx->pipe = priv->pipe++;
128 128
129 ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev); 129 decon_clear_channels(ctx->crtc);
130
131 ret = drm_iommu_attach_device(drm_dev, ctx->dev);
130 if (ret) 132 if (ret)
131 priv->pipe--; 133 priv->pipe--;
132 134
@@ -136,8 +138,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
136static void decon_ctx_remove(struct decon_context *ctx) 138static void decon_ctx_remove(struct decon_context *ctx)
137{ 139{
138 /* detach this sub driver from iommu mapping if supported. */ 140 /* detach this sub driver from iommu mapping if supported. */
139 if (is_drm_iommu_supported(ctx->drm_dev)) 141 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
140 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
141} 142}
142 143
143static u32 decon_calc_clkdiv(struct decon_context *ctx, 144static u32 decon_calc_clkdiv(struct decon_context *ctx,
@@ -271,16 +272,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
271 } 272 }
272} 273}
273 274
274static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) 275static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
276 struct drm_framebuffer *fb)
275{ 277{
276 struct exynos_drm_plane *plane = &ctx->planes[win];
277 unsigned long val; 278 unsigned long val;
278 int padding; 279 int padding;
279 280
280 val = readl(ctx->regs + WINCON(win)); 281 val = readl(ctx->regs + WINCON(win));
281 val &= ~WINCONx_BPPMODE_MASK; 282 val &= ~WINCONx_BPPMODE_MASK;
282 283
283 switch (plane->pixel_format) { 284 switch (fb->pixel_format) {
284 case DRM_FORMAT_RGB565: 285 case DRM_FORMAT_RGB565:
285 val |= WINCONx_BPPMODE_16BPP_565; 286 val |= WINCONx_BPPMODE_16BPP_565;
286 val |= WINCONx_BURSTLEN_16WORD; 287 val |= WINCONx_BURSTLEN_16WORD;
@@ -329,7 +330,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
329 break; 330 break;
330 } 331 }
331 332
332 DRM_DEBUG_KMS("bpp = %d\n", plane->bpp); 333 DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
333 334
334 /* 335 /*
335 * In case of exynos, setting dma-burst to 16Word causes permanent 336 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -339,8 +340,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
339 * movement causes unstable DMA which results into iommu crash/tear. 340 * movement causes unstable DMA which results into iommu crash/tear.
340 */ 341 */
341 342
342 padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; 343 padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
343 if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { 344 if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
344 val &= ~WINCONx_BURSTLEN_MASK; 345 val &= ~WINCONx_BURSTLEN_MASK;
345 val |= WINCONx_BURSTLEN_8WORD; 346 val |= WINCONx_BURSTLEN_8WORD;
346 } 347 }
@@ -382,23 +383,19 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
382 writel(val, ctx->regs + SHADOWCON); 383 writel(val, ctx->regs + SHADOWCON);
383} 384}
384 385
385static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) 386static void decon_update_plane(struct exynos_drm_crtc *crtc,
387 struct exynos_drm_plane *plane)
386{ 388{
387 struct decon_context *ctx = crtc->ctx; 389 struct decon_context *ctx = crtc->ctx;
388 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; 390 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
389 struct exynos_drm_plane *plane; 391 struct drm_plane_state *state = plane->base.state;
390 int padding; 392 int padding;
391 unsigned long val, alpha; 393 unsigned long val, alpha;
392 unsigned int last_x; 394 unsigned int last_x;
393 unsigned int last_y; 395 unsigned int last_y;
394 396 unsigned int win = plane->zpos;
395 if (ctx->suspended) 397 unsigned int bpp = state->fb->bits_per_pixel >> 3;
396 return; 398 unsigned int pitch = state->fb->pitches[0];
397
398 if (win < 0 || win >= WINDOWS_NR)
399 return;
400
401 plane = &ctx->planes[win];
402 399
403 if (ctx->suspended) 400 if (ctx->suspended)
404 return; 401 return;
@@ -420,11 +417,11 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
420 val = (unsigned long)plane->dma_addr[0]; 417 val = (unsigned long)plane->dma_addr[0];
421 writel(val, ctx->regs + VIDW_BUF_START(win)); 418 writel(val, ctx->regs + VIDW_BUF_START(win));
422 419
423 padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; 420 padding = (pitch / bpp) - state->fb->width;
424 421
425 /* buffer size */ 422 /* buffer size */
426 writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win)); 423 writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
427 writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win)); 424 writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
428 425
429 /* offset from the start of the buffer to read */ 426 /* offset from the start of the buffer to read */
430 writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); 427 writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
@@ -433,25 +430,25 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
433 DRM_DEBUG_KMS("start addr = 0x%lx\n", 430 DRM_DEBUG_KMS("start addr = 0x%lx\n",
434 (unsigned long)val); 431 (unsigned long)val);
435 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 432 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
436 plane->crtc_width, plane->crtc_height); 433 plane->crtc_w, plane->crtc_h);
437 434
438 /* 435 /*
439 * OSD position. 436 * OSD position.
440 * In case the window layout goes of LCD layout, DECON fails. 437 * In case the window layout goes of LCD layout, DECON fails.
441 */ 438 */
442 if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay) 439 if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
443 plane->crtc_x = mode->hdisplay - plane->crtc_width; 440 plane->crtc_x = mode->hdisplay - plane->crtc_w;
444 if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay) 441 if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
445 plane->crtc_y = mode->vdisplay - plane->crtc_height; 442 plane->crtc_y = mode->vdisplay - plane->crtc_h;
446 443
447 val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | 444 val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
448 VIDOSDxA_TOPLEFT_Y(plane->crtc_y); 445 VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
449 writel(val, ctx->regs + VIDOSD_A(win)); 446 writel(val, ctx->regs + VIDOSD_A(win));
450 447
451 last_x = plane->crtc_x + plane->crtc_width; 448 last_x = plane->crtc_x + plane->crtc_w;
452 if (last_x) 449 if (last_x)
453 last_x--; 450 last_x--;
454 last_y = plane->crtc_y + plane->crtc_height; 451 last_y = plane->crtc_y + plane->crtc_h;
455 if (last_y) 452 if (last_y)
456 last_y--; 453 last_y--;
457 454
@@ -475,7 +472,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
475 472
476 writel(alpha, ctx->regs + VIDOSD_D(win)); 473 writel(alpha, ctx->regs + VIDOSD_D(win));
477 474
478 decon_win_set_pixfmt(ctx, win); 475 decon_win_set_pixfmt(ctx, win, state->fb);
479 476
480 /* hardware window 0 doesn't support color key. */ 477 /* hardware window 0 doesn't support color key. */
481 if (win != 0) 478 if (win != 0)
@@ -495,17 +492,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
495 writel(val, ctx->regs + DECON_UPDATE); 492 writel(val, ctx->regs + DECON_UPDATE);
496} 493}
497 494
498static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 495static void decon_disable_plane(struct exynos_drm_crtc *crtc,
496 struct exynos_drm_plane *plane)
499{ 497{
500 struct decon_context *ctx = crtc->ctx; 498 struct decon_context *ctx = crtc->ctx;
501 struct exynos_drm_plane *plane; 499 unsigned int win = plane->zpos;
502 u32 val; 500 u32 val;
503 501
504 if (win < 0 || win >= WINDOWS_NR)
505 return;
506
507 plane = &ctx->planes[win];
508
509 if (ctx->suspended) 502 if (ctx->suspended)
510 return; 503 return;
511 504
@@ -601,7 +594,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
601 * a destroyed buffer later. 594 * a destroyed buffer later.
602 */ 595 */
603 for (i = 0; i < WINDOWS_NR; i++) 596 for (i = 0; i < WINDOWS_NR; i++)
604 decon_win_disable(crtc, i); 597 decon_disable_plane(crtc, &ctx->planes[i]);
605 598
606 clk_disable_unprepare(ctx->vclk); 599 clk_disable_unprepare(ctx->vclk);
607 clk_disable_unprepare(ctx->eclk); 600 clk_disable_unprepare(ctx->eclk);
@@ -621,9 +614,8 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
621 .enable_vblank = decon_enable_vblank, 614 .enable_vblank = decon_enable_vblank,
622 .disable_vblank = decon_disable_vblank, 615 .disable_vblank = decon_disable_vblank,
623 .wait_for_vblank = decon_wait_for_vblank, 616 .wait_for_vblank = decon_wait_for_vblank,
624 .win_commit = decon_win_commit, 617 .update_plane = decon_update_plane,
625 .win_disable = decon_win_disable, 618 .disable_plane = decon_disable_plane,
626 .clear_channels = decon_clear_channels,
627}; 619};
628 620
629 621
@@ -643,8 +635,8 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
643 goto out; 635 goto out;
644 636
645 if (!ctx->i80_if) { 637 if (!ctx->i80_if) {
646 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 638 drm_crtc_handle_vblank(&ctx->crtc->base);
647 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 639 exynos_drm_crtc_finish_pageflip(ctx->crtc);
648 640
649 /* set wait vsync event to zero and wake up queue. */ 641 /* set wait vsync event to zero and wake up queue. */
650 if (atomic_read(&ctx->wait_vsync_event)) { 642 if (atomic_read(&ctx->wait_vsync_event)) {
@@ -689,8 +681,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
689 return PTR_ERR(ctx->crtc); 681 return PTR_ERR(ctx->crtc);
690 } 682 }
691 683
692 if (ctx->display) 684 if (ctx->encoder)
693 exynos_drm_create_enc_conn(drm_dev, ctx->display); 685 exynos_dpi_bind(drm_dev, ctx->encoder);
694 686
695 return 0; 687 return 0;
696 688
@@ -703,8 +695,8 @@ static void decon_unbind(struct device *dev, struct device *master,
703 695
704 decon_disable(ctx->crtc); 696 decon_disable(ctx->crtc);
705 697
706 if (ctx->display) 698 if (ctx->encoder)
707 exynos_dpi_remove(ctx->display); 699 exynos_dpi_remove(ctx->encoder);
708 700
709 decon_ctx_remove(ctx); 701 decon_ctx_remove(ctx);
710} 702}
@@ -789,9 +781,9 @@ static int decon_probe(struct platform_device *pdev)
789 781
790 platform_set_drvdata(pdev, ctx); 782 platform_set_drvdata(pdev, ctx);
791 783
792 ctx->display = exynos_dpi_probe(dev); 784 ctx->encoder = exynos_dpi_probe(dev);
793 if (IS_ERR(ctx->display)) { 785 if (IS_ERR(ctx->encoder)) {
794 ret = PTR_ERR(ctx->display); 786 ret = PTR_ERR(ctx->encoder);
795 goto err_iounmap; 787 goto err_iounmap;
796 } 788 }
797 789
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 172b8002a2c8..d66ade0efac8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,19 +32,20 @@
32#include <drm/drm_panel.h> 32#include <drm/drm_panel.h>
33 33
34#include "exynos_dp_core.h" 34#include "exynos_dp_core.h"
35#include "exynos_drm_crtc.h"
35 36
36#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ 37#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
37 connector) 38 connector)
38 39
39static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp) 40static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
40{ 41{
41 return to_exynos_crtc(dp->encoder->crtc); 42 return to_exynos_crtc(dp->encoder.crtc);
42} 43}
43 44
44static inline struct exynos_dp_device * 45static inline struct exynos_dp_device *encoder_to_dp(
45display_to_dp(struct exynos_drm_display *d) 46 struct drm_encoder *e)
46{ 47{
47 return container_of(d, struct exynos_dp_device, display); 48 return container_of(e, struct exynos_dp_device, encoder);
48} 49}
49 50
50struct bridge_init { 51struct bridge_init {
@@ -795,9 +796,6 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp)
795 /* Configure video slave mode */ 796 /* Configure video slave mode */
796 exynos_dp_enable_video_master(dp, 0); 797 exynos_dp_enable_video_master(dp, 0);
797 798
798 /* Enable video */
799 exynos_dp_start_video(dp);
800
801 timeout_loop = 0; 799 timeout_loop = 0;
802 800
803 for (;;) { 801 for (;;) {
@@ -891,9 +889,9 @@ static void exynos_dp_hotplug(struct work_struct *work)
891 drm_helper_hpd_irq_event(dp->drm_dev); 889 drm_helper_hpd_irq_event(dp->drm_dev);
892} 890}
893 891
894static void exynos_dp_commit(struct exynos_drm_display *display) 892static void exynos_dp_commit(struct drm_encoder *encoder)
895{ 893{
896 struct exynos_dp_device *dp = display_to_dp(display); 894 struct exynos_dp_device *dp = encoder_to_dp(encoder);
897 int ret; 895 int ret;
898 896
899 /* Keep the panel disabled while we configure video */ 897 /* Keep the panel disabled while we configure video */
@@ -938,6 +936,9 @@ static void exynos_dp_commit(struct exynos_drm_display *display)
938 if (drm_panel_enable(dp->panel)) 936 if (drm_panel_enable(dp->panel))
939 DRM_ERROR("failed to enable the panel\n"); 937 DRM_ERROR("failed to enable the panel\n");
940 } 938 }
939
940 /* Enable video */
941 exynos_dp_start_video(dp);
941} 942}
942 943
943static enum drm_connector_status exynos_dp_detect( 944static enum drm_connector_status exynos_dp_detect(
@@ -994,7 +995,7 @@ static struct drm_encoder *exynos_dp_best_encoder(
994{ 995{
995 struct exynos_dp_device *dp = ctx_from_connector(connector); 996 struct exynos_dp_device *dp = ctx_from_connector(connector);
996 997
997 return dp->encoder; 998 return &dp->encoder;
998} 999}
999 1000
1000static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { 1001static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
@@ -1019,15 +1020,12 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
1019 return 0; 1020 return 0;
1020} 1021}
1021 1022
1022static int exynos_dp_create_connector(struct exynos_drm_display *display, 1023static int exynos_dp_create_connector(struct drm_encoder *encoder)
1023 struct drm_encoder *encoder)
1024{ 1024{
1025 struct exynos_dp_device *dp = display_to_dp(display); 1025 struct exynos_dp_device *dp = encoder_to_dp(encoder);
1026 struct drm_connector *connector = &dp->connector; 1026 struct drm_connector *connector = &dp->connector;
1027 int ret; 1027 int ret;
1028 1028
1029 dp->encoder = encoder;
1030
1031 /* Pre-empt DP connector creation if there's a bridge */ 1029 /* Pre-empt DP connector creation if there's a bridge */
1032 if (dp->bridge) { 1030 if (dp->bridge) {
1033 ret = exynos_drm_attach_lcd_bridge(dp, encoder); 1031 ret = exynos_drm_attach_lcd_bridge(dp, encoder);
@@ -1054,20 +1052,22 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
1054 return ret; 1052 return ret;
1055} 1053}
1056 1054
1057static void exynos_dp_phy_init(struct exynos_dp_device *dp) 1055static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
1056 const struct drm_display_mode *mode,
1057 struct drm_display_mode *adjusted_mode)
1058{ 1058{
1059 if (dp->phy) 1059 return true;
1060 phy_power_on(dp->phy);
1061} 1060}
1062 1061
1063static void exynos_dp_phy_exit(struct exynos_dp_device *dp) 1062static void exynos_dp_mode_set(struct drm_encoder *encoder,
1063 struct drm_display_mode *mode,
1064 struct drm_display_mode *adjusted_mode)
1064{ 1065{
1065 if (dp->phy)
1066 phy_power_off(dp->phy);
1067} 1066}
1068 1067
1069static void exynos_dp_poweron(struct exynos_dp_device *dp) 1068static void exynos_dp_enable(struct drm_encoder *encoder)
1070{ 1069{
1070 struct exynos_dp_device *dp = encoder_to_dp(encoder);
1071 struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1071 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1072 1072
1073 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1073 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
@@ -1084,14 +1084,17 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
1084 crtc->ops->clock_enable(dp_to_crtc(dp), true); 1084 crtc->ops->clock_enable(dp_to_crtc(dp), true);
1085 1085
1086 clk_prepare_enable(dp->clock); 1086 clk_prepare_enable(dp->clock);
1087 exynos_dp_phy_init(dp); 1087 phy_power_on(dp->phy);
1088 exynos_dp_init_dp(dp); 1088 exynos_dp_init_dp(dp);
1089 enable_irq(dp->irq); 1089 enable_irq(dp->irq);
1090 exynos_dp_commit(&dp->display); 1090 exynos_dp_commit(&dp->encoder);
1091
1092 dp->dpms_mode = DRM_MODE_DPMS_ON;
1091} 1093}
1092 1094
1093static void exynos_dp_poweroff(struct exynos_dp_device *dp) 1095static void exynos_dp_disable(struct drm_encoder *encoder)
1094{ 1096{
1097 struct exynos_dp_device *dp = encoder_to_dp(encoder);
1095 struct exynos_drm_crtc *crtc = dp_to_crtc(dp); 1098 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1096 1099
1097 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1100 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1106,7 +1109,7 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
1106 1109
1107 disable_irq(dp->irq); 1110 disable_irq(dp->irq);
1108 flush_work(&dp->hotplug_work); 1111 flush_work(&dp->hotplug_work);
1109 exynos_dp_phy_exit(dp); 1112 phy_power_off(dp->phy);
1110 clk_disable_unprepare(dp->clock); 1113 clk_disable_unprepare(dp->clock);
1111 1114
1112 if (crtc->ops->clock_enable) 1115 if (crtc->ops->clock_enable)
@@ -1116,31 +1119,19 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
1116 if (drm_panel_unprepare(dp->panel)) 1119 if (drm_panel_unprepare(dp->panel))
1117 DRM_ERROR("failed to turnoff the panel\n"); 1120 DRM_ERROR("failed to turnoff the panel\n");
1118 } 1121 }
1119}
1120
1121static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
1122{
1123 struct exynos_dp_device *dp = display_to_dp(display);
1124 1122
1125 switch (mode) { 1123 dp->dpms_mode = DRM_MODE_DPMS_OFF;
1126 case DRM_MODE_DPMS_ON:
1127 exynos_dp_poweron(dp);
1128 break;
1129 case DRM_MODE_DPMS_STANDBY:
1130 case DRM_MODE_DPMS_SUSPEND:
1131 case DRM_MODE_DPMS_OFF:
1132 exynos_dp_poweroff(dp);
1133 break;
1134 default:
1135 break;
1136 }
1137 dp->dpms_mode = mode;
1138} 1124}
1139 1125
1140static struct exynos_drm_display_ops exynos_dp_display_ops = { 1126static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
1141 .create_connector = exynos_dp_create_connector, 1127 .mode_fixup = exynos_dp_mode_fixup,
1142 .dpms = exynos_dp_dpms, 1128 .mode_set = exynos_dp_mode_set,
1143 .commit = exynos_dp_commit, 1129 .enable = exynos_dp_enable,
1130 .disable = exynos_dp_disable,
1131};
1132
1133static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
1134 .destroy = drm_encoder_cleanup,
1144}; 1135};
1145 1136
1146static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) 1137static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
@@ -1219,9 +1210,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1219 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1210 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1220 struct platform_device *pdev = to_platform_device(dev); 1211 struct platform_device *pdev = to_platform_device(dev);
1221 struct drm_device *drm_dev = data; 1212 struct drm_device *drm_dev = data;
1213 struct drm_encoder *encoder = &dp->encoder;
1222 struct resource *res; 1214 struct resource *res;
1223 unsigned int irq_flags; 1215 unsigned int irq_flags;
1224 int ret = 0; 1216 int pipe, ret = 0;
1225 1217
1226 dp->dev = &pdev->dev; 1218 dp->dev = &pdev->dev;
1227 dp->dpms_mode = DRM_MODE_DPMS_OFF; 1219 dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1297,7 +1289,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1297 1289
1298 INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); 1290 INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
1299 1291
1300 exynos_dp_phy_init(dp); 1292 phy_power_on(dp->phy);
1301 1293
1302 exynos_dp_init_dp(dp); 1294 exynos_dp_init_dp(dp);
1303 1295
@@ -1311,7 +1303,28 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1311 1303
1312 dp->drm_dev = drm_dev; 1304 dp->drm_dev = drm_dev;
1313 1305
1314 return exynos_drm_create_enc_conn(drm_dev, &dp->display); 1306 pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
1307 EXYNOS_DISPLAY_TYPE_LCD);
1308 if (pipe < 0)
1309 return pipe;
1310
1311 encoder->possible_crtcs = 1 << pipe;
1312
1313 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1314
1315 drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
1316 DRM_MODE_ENCODER_TMDS);
1317
1318 drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
1319
1320 ret = exynos_dp_create_connector(encoder);
1321 if (ret) {
1322 DRM_ERROR("failed to create connector ret = %d\n", ret);
1323 drm_encoder_cleanup(encoder);
1324 return ret;
1325 }
1326
1327 return 0;
1315} 1328}
1316 1329
1317static void exynos_dp_unbind(struct device *dev, struct device *master, 1330static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -1319,7 +1332,7 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
1319{ 1332{
1320 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1333 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1321 1334
1322 exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF); 1335 exynos_dp_disable(&dp->encoder);
1323} 1336}
1324 1337
1325static const struct component_ops exynos_dp_ops = { 1338static const struct component_ops exynos_dp_ops = {
@@ -1338,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
1338 if (!dp) 1351 if (!dp)
1339 return -ENOMEM; 1352 return -ENOMEM;
1340 1353
1341 dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
1342 dp->display.ops = &exynos_dp_display_ops;
1343 platform_set_drvdata(pdev, dp); 1354 platform_set_drvdata(pdev, dp);
1344 1355
1345 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1356 panel_node = of_parse_phandle(dev->of_node, "panel", 0);
@@ -1377,7 +1388,7 @@ static int exynos_dp_suspend(struct device *dev)
1377{ 1388{
1378 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1389 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1379 1390
1380 exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF); 1391 exynos_dp_disable(&dp->encoder);
1381 return 0; 1392 return 0;
1382} 1393}
1383 1394
@@ -1385,7 +1396,7 @@ static int exynos_dp_resume(struct device *dev)
1385{ 1396{
1386 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1397 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1387 1398
1388 exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON); 1399 exynos_dp_enable(&dp->encoder);
1389 return 0; 1400 return 0;
1390} 1401}
1391#endif 1402#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a4e799679669..e413b6f7b0e7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -147,11 +147,10 @@ struct link_train {
147}; 147};
148 148
149struct exynos_dp_device { 149struct exynos_dp_device {
150 struct exynos_drm_display display; 150 struct drm_encoder encoder;
151 struct device *dev; 151 struct device *dev;
152 struct drm_device *drm_dev; 152 struct drm_device *drm_dev;
153 struct drm_connector connector; 153 struct drm_connector connector;
154 struct drm_encoder *encoder;
155 struct drm_panel *panel; 154 struct drm_panel *panel;
156 struct drm_bridge *bridge; 155 struct drm_bridge *bridge;
157 struct clk *clock; 156 struct clk *clock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
deleted file mode 100644
index 24994ba10e28..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/* exynos_drm_buf.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
14
15#include "exynos_drm_drv.h"
16#include "exynos_drm_gem.h"
17#include "exynos_drm_buf.h"
18#include "exynos_drm_iommu.h"
19
20static int lowlevel_buffer_allocate(struct drm_device *dev,
21 unsigned int flags, struct exynos_drm_gem_buf *buf)
22{
23 int ret = 0;
24 enum dma_attr attr;
25 unsigned int nr_pages;
26
27 if (buf->dma_addr) {
28 DRM_DEBUG_KMS("already allocated.\n");
29 return 0;
30 }
31
32 init_dma_attrs(&buf->dma_attrs);
33
34 /*
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
37 * as possible.
38 */
39 if (!(flags & EXYNOS_BO_NONCONTIG))
40 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
41
42 /*
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
45 */
46 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
47 attr = DMA_ATTR_WRITE_COMBINE;
48 else
49 attr = DMA_ATTR_NON_CONSISTENT;
50
51 dma_set_attr(attr, &buf->dma_attrs);
52 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
53
54 nr_pages = buf->size >> PAGE_SHIFT;
55
56 if (!is_drm_iommu_supported(dev)) {
57 dma_addr_t start_addr;
58 unsigned int i = 0;
59
60 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
61 if (!buf->pages) {
62 DRM_ERROR("failed to allocate pages.\n");
63 return -ENOMEM;
64 }
65
66 buf->cookie = dma_alloc_attrs(dev->dev,
67 buf->size,
68 &buf->dma_addr, GFP_KERNEL,
69 &buf->dma_attrs);
70 if (!buf->cookie) {
71 DRM_ERROR("failed to allocate buffer.\n");
72 ret = -ENOMEM;
73 goto err_free;
74 }
75
76 start_addr = buf->dma_addr;
77 while (i < nr_pages) {
78 buf->pages[i] = phys_to_page(start_addr);
79 start_addr += PAGE_SIZE;
80 i++;
81 }
82 } else {
83
84 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
85 &buf->dma_addr, GFP_KERNEL,
86 &buf->dma_attrs);
87 if (!buf->pages) {
88 DRM_ERROR("failed to allocate buffer.\n");
89 return -ENOMEM;
90 }
91 }
92
93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
94 if (IS_ERR(buf->sgt)) {
95 DRM_ERROR("failed to get sg table.\n");
96 ret = PTR_ERR(buf->sgt);
97 goto err_free_attrs;
98 }
99
100 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
101 (unsigned long)buf->dma_addr,
102 buf->size);
103
104 return ret;
105
106err_free_attrs:
107 dma_free_attrs(dev->dev, buf->size, buf->pages,
108 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
109 buf->dma_addr = (dma_addr_t)NULL;
110err_free:
111 if (!is_drm_iommu_supported(dev))
112 drm_free_large(buf->pages);
113
114 return ret;
115}
116
117static void lowlevel_buffer_deallocate(struct drm_device *dev,
118 unsigned int flags, struct exynos_drm_gem_buf *buf)
119{
120 if (!buf->dma_addr) {
121 DRM_DEBUG_KMS("dma_addr is invalid.\n");
122 return;
123 }
124
125 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
126 (unsigned long)buf->dma_addr,
127 buf->size);
128
129 sg_free_table(buf->sgt);
130
131 kfree(buf->sgt);
132 buf->sgt = NULL;
133
134 if (!is_drm_iommu_supported(dev)) {
135 dma_free_attrs(dev->dev, buf->size, buf->cookie,
136 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
137 drm_free_large(buf->pages);
138 } else
139 dma_free_attrs(dev->dev, buf->size, buf->pages,
140 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141
142 buf->dma_addr = (dma_addr_t)NULL;
143}
144
145struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
146 unsigned int size)
147{
148 struct exynos_drm_gem_buf *buffer;
149
150 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
151
152 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
153 if (!buffer)
154 return NULL;
155
156 buffer->size = size;
157 return buffer;
158}
159
160void exynos_drm_fini_buf(struct drm_device *dev,
161 struct exynos_drm_gem_buf *buffer)
162{
163 kfree(buffer);
164 buffer = NULL;
165}
166
167int exynos_drm_alloc_buf(struct drm_device *dev,
168 struct exynos_drm_gem_buf *buf, unsigned int flags)
169{
170
171 /*
172 * allocate memory region and set the memory information
173 * to vaddr and dma_addr of a buffer object.
174 */
175 if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
176 return -ENOMEM;
177
178 return 0;
179}
180
181void exynos_drm_free_buf(struct drm_device *dev,
182 unsigned int flags, struct exynos_drm_gem_buf *buffer)
183{
184
185 lowlevel_buffer_deallocate(dev, flags, buffer);
186}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
deleted file mode 100644
index a6412f19673c..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/* exynos_drm_buf.h
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef _EXYNOS_DRM_BUF_H_
13#define _EXYNOS_DRM_BUF_H_
14
15/* create and initialize buffer object. */
16struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
17 unsigned int size);
18
19/* destroy buffer object. */
20void exynos_drm_fini_buf(struct drm_device *dev,
21 struct exynos_drm_gem_buf *buffer);
22
23/* allocate physical memory region and setup sgt. */
24int exynos_drm_alloc_buf(struct drm_device *dev,
25 struct exynos_drm_gem_buf *buf,
26 unsigned int flags);
27
28/* release physical memory region, and sgt. */
29void exynos_drm_free_buf(struct drm_device *dev,
30 unsigned int flags,
31 struct exynos_drm_gem_buf *buffer);
32
33#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4c9f972eaa07..c68a6a2a9b57 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,46 +15,10 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include "exynos_drm_drv.h" 16#include "exynos_drm_drv.h"
17#include "exynos_drm_crtc.h" 17#include "exynos_drm_crtc.h"
18#include "exynos_drm_encoder.h"
19#include "exynos_drm_fbdev.h" 18#include "exynos_drm_fbdev.h"
20 19
21static LIST_HEAD(exynos_drm_subdrv_list); 20static LIST_HEAD(exynos_drm_subdrv_list);
22 21
23int exynos_drm_create_enc_conn(struct drm_device *dev,
24 struct exynos_drm_display *display)
25{
26 struct drm_encoder *encoder;
27 int ret;
28 unsigned long possible_crtcs = 0;
29
30 ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
31 if (ret < 0)
32 return ret;
33
34 possible_crtcs |= 1 << ret;
35
36 /* create and initialize a encoder for this sub driver. */
37 encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
38 if (!encoder) {
39 DRM_ERROR("failed to create encoder\n");
40 return -EFAULT;
41 }
42
43 display->encoder = encoder;
44
45 ret = display->ops->create_connector(display, encoder);
46 if (ret) {
47 DRM_ERROR("failed to create connector ret = %d\n", ret);
48 goto err_destroy_encoder;
49 }
50
51 return 0;
52
53err_destroy_encoder:
54 encoder->funcs->destroy(encoder);
55 return ret;
56}
57
58int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) 22int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
59{ 23{
60 if (!subdrv) 24 if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 1610757230a5..c47899738eb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,7 +19,6 @@
19 19
20#include "exynos_drm_crtc.h" 20#include "exynos_drm_crtc.h"
21#include "exynos_drm_drv.h" 21#include "exynos_drm_drv.h"
22#include "exynos_drm_encoder.h"
23#include "exynos_drm_plane.h" 22#include "exynos_drm_plane.h"
24 23
25static void exynos_drm_crtc_enable(struct drm_crtc *crtc) 24static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
@@ -177,7 +176,7 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
177 return -EPERM; 176 return -EPERM;
178 177
179 if (exynos_crtc->ops->enable_vblank) 178 if (exynos_crtc->ops->enable_vblank)
180 exynos_crtc->ops->enable_vblank(exynos_crtc); 179 return exynos_crtc->ops->enable_vblank(exynos_crtc);
181 180
182 return 0; 181 return 0;
183} 182}
@@ -195,24 +194,22 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
195 exynos_crtc->ops->disable_vblank(exynos_crtc); 194 exynos_crtc->ops->disable_vblank(exynos_crtc);
196} 195}
197 196
198void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe) 197void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc)
199{ 198{
200 struct exynos_drm_private *dev_priv = dev->dev_private; 199 struct drm_crtc *crtc = &exynos_crtc->base;
201 struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
202 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
203 unsigned long flags; 200 unsigned long flags;
204 201
205 spin_lock_irqsave(&dev->event_lock, flags); 202 spin_lock_irqsave(&crtc->dev->event_lock, flags);
206 if (exynos_crtc->event) { 203 if (exynos_crtc->event) {
207 204
208 drm_send_vblank_event(dev, -1, exynos_crtc->event); 205 drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
209 drm_vblank_put(dev, pipe); 206 drm_crtc_vblank_put(crtc);
210 wake_up(&exynos_crtc->pending_flip_queue); 207 wake_up(&exynos_crtc->pending_flip_queue);
211 208
212 } 209 }
213 210
214 exynos_crtc->event = NULL; 211 exynos_crtc->event = NULL;
215 spin_unlock_irqrestore(&dev->event_lock, flags); 212 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
216} 213}
217 214
218void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) 215void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
@@ -239,7 +236,7 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
239} 236}
240 237
241int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, 238int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
242 unsigned int out_type) 239 enum exynos_drm_output_type out_type)
243{ 240{
244 struct drm_crtc *crtc; 241 struct drm_crtc *crtc;
245 242
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0f3aa70818e3..9e7027d6c2f6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -25,12 +25,12 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
25 void *context); 25 void *context);
26int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); 26int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
27void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); 27void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
28void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe); 28void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc);
29void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb); 29void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
30 30
31/* This function gets pipe value to crtc device matched with out_type. */ 31/* This function gets pipe value to crtc device matched with out_type. */
32int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, 32int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
33 unsigned int out_type); 33 enum exynos_drm_output_type out_type);
34 34
35/* 35/*
36 * This function calls the crtc device(manager)'s te_handler() callback 36 * This function calls the crtc device(manager)'s te_handler() callback
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
deleted file mode 100644
index cd485c091b30..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ /dev/null
@@ -1,286 +0,0 @@
1/* exynos_drm_dmabuf.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
14#include "exynos_drm_dmabuf.h"
15#include "exynos_drm_drv.h"
16#include "exynos_drm_gem.h"
17
18#include <linux/dma-buf.h>
19
20struct exynos_drm_dmabuf_attachment {
21 struct sg_table sgt;
22 enum dma_data_direction dir;
23 bool is_mapped;
24};
25
26static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
27{
28 return to_exynos_gem_obj(buf->priv);
29}
30
31static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
32 struct device *dev,
33 struct dma_buf_attachment *attach)
34{
35 struct exynos_drm_dmabuf_attachment *exynos_attach;
36
37 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
38 if (!exynos_attach)
39 return -ENOMEM;
40
41 exynos_attach->dir = DMA_NONE;
42 attach->priv = exynos_attach;
43
44 return 0;
45}
46
47static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
48 struct dma_buf_attachment *attach)
49{
50 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
51 struct sg_table *sgt;
52
53 if (!exynos_attach)
54 return;
55
56 sgt = &exynos_attach->sgt;
57
58 if (exynos_attach->dir != DMA_NONE)
59 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
60 exynos_attach->dir);
61
62 sg_free_table(sgt);
63 kfree(exynos_attach);
64 attach->priv = NULL;
65}
66
67static struct sg_table *
68 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
69 enum dma_data_direction dir)
70{
71 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
72 struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
73 struct drm_device *dev = gem_obj->base.dev;
74 struct exynos_drm_gem_buf *buf;
75 struct scatterlist *rd, *wr;
76 struct sg_table *sgt = NULL;
77 unsigned int i;
78 int nents, ret;
79
80 /* just return current sgt if already requested. */
81 if (exynos_attach->dir == dir && exynos_attach->is_mapped)
82 return &exynos_attach->sgt;
83
84 buf = gem_obj->buffer;
85 if (!buf) {
86 DRM_ERROR("buffer is null.\n");
87 return ERR_PTR(-ENOMEM);
88 }
89
90 sgt = &exynos_attach->sgt;
91
92 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
93 if (ret) {
94 DRM_ERROR("failed to alloc sgt.\n");
95 return ERR_PTR(-ENOMEM);
96 }
97
98 mutex_lock(&dev->struct_mutex);
99
100 rd = buf->sgt->sgl;
101 wr = sgt->sgl;
102 for (i = 0; i < sgt->orig_nents; ++i) {
103 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
104 rd = sg_next(rd);
105 wr = sg_next(wr);
106 }
107
108 if (dir != DMA_NONE) {
109 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
110 if (!nents) {
111 DRM_ERROR("failed to map sgl with iommu.\n");
112 sg_free_table(sgt);
113 sgt = ERR_PTR(-EIO);
114 goto err_unlock;
115 }
116 }
117
118 exynos_attach->is_mapped = true;
119 exynos_attach->dir = dir;
120 attach->priv = exynos_attach;
121
122 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
123
124err_unlock:
125 mutex_unlock(&dev->struct_mutex);
126 return sgt;
127}
128
129static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
130 struct sg_table *sgt,
131 enum dma_data_direction dir)
132{
133 /* Nothing to do. */
134}
135
136static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
137 unsigned long page_num)
138{
139 /* TODO */
140
141 return NULL;
142}
143
144static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
145 unsigned long page_num,
146 void *addr)
147{
148 /* TODO */
149}
150
151static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
152 unsigned long page_num)
153{
154 /* TODO */
155
156 return NULL;
157}
158
159static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
160 unsigned long page_num, void *addr)
161{
162 /* TODO */
163}
164
165static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
166 struct vm_area_struct *vma)
167{
168 return -ENOTTY;
169}
170
171static struct dma_buf_ops exynos_dmabuf_ops = {
172 .attach = exynos_gem_attach_dma_buf,
173 .detach = exynos_gem_detach_dma_buf,
174 .map_dma_buf = exynos_gem_map_dma_buf,
175 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
176 .kmap = exynos_gem_dmabuf_kmap,
177 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
178 .kunmap = exynos_gem_dmabuf_kunmap,
179 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
180 .mmap = exynos_gem_dmabuf_mmap,
181 .release = drm_gem_dmabuf_release,
182};
183
184struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
185 struct drm_gem_object *obj, int flags)
186{
187 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
188 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
189
190 exp_info.ops = &exynos_dmabuf_ops;
191 exp_info.size = exynos_gem_obj->base.size;
192 exp_info.flags = flags;
193 exp_info.priv = obj;
194
195 return dma_buf_export(&exp_info);
196}
197
198struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
199 struct dma_buf *dma_buf)
200{
201 struct dma_buf_attachment *attach;
202 struct sg_table *sgt;
203 struct scatterlist *sgl;
204 struct exynos_drm_gem_obj *exynos_gem_obj;
205 struct exynos_drm_gem_buf *buffer;
206 int ret;
207
208 /* is this one of own objects? */
209 if (dma_buf->ops == &exynos_dmabuf_ops) {
210 struct drm_gem_object *obj;
211
212 obj = dma_buf->priv;
213
214 /* is it from our device? */
215 if (obj->dev == drm_dev) {
216 /*
217 * Importing dmabuf exported from out own gem increases
218 * refcount on gem itself instead of f_count of dmabuf.
219 */
220 drm_gem_object_reference(obj);
221 return obj;
222 }
223 }
224
225 attach = dma_buf_attach(dma_buf, drm_dev->dev);
226 if (IS_ERR(attach))
227 return ERR_PTR(-EINVAL);
228
229 get_dma_buf(dma_buf);
230
231 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
232 if (IS_ERR(sgt)) {
233 ret = PTR_ERR(sgt);
234 goto err_buf_detach;
235 }
236
237 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
238 if (!buffer) {
239 ret = -ENOMEM;
240 goto err_unmap_attach;
241 }
242
243 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244 if (!exynos_gem_obj) {
245 ret = -ENOMEM;
246 goto err_free_buffer;
247 }
248
249 sgl = sgt->sgl;
250
251 buffer->size = dma_buf->size;
252 buffer->dma_addr = sg_dma_address(sgl);
253
254 if (sgt->nents == 1) {
255 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257 } else {
258 /*
259 * this case could be CONTIG or NONCONTIG type but for now
260 * sets NONCONTIG.
261 * TODO. we have to find a way that exporter can notify
262 * the type of its own buffer to importer.
263 */
264 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
265 }
266
267 exynos_gem_obj->buffer = buffer;
268 buffer->sgt = sgt;
269 exynos_gem_obj->base.import_attach = attach;
270
271 DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
272 buffer->size);
273
274 return &exynos_gem_obj->base;
275
276err_free_buffer:
277 kfree(buffer);
278 buffer = NULL;
279err_unmap_attach:
280 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
281err_buf_detach:
282 dma_buf_detach(dma_buf, attach);
283 dma_buf_put(dma_buf);
284
285 return ERR_PTR(ret);
286}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
deleted file mode 100644
index 886de9ff484d..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/* exynos_drm_dmabuf.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef _EXYNOS_DRM_DMABUF_H_
13#define _EXYNOS_DRM_DMABUF_H_
14
15struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
16 struct drm_gem_object *obj, int flags);
17
18struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
19 struct dma_buf *dma_buf);
20#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7cb6595c1894..c748b8790de3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -20,26 +20,24 @@
20#include <video/of_videomode.h> 20#include <video/of_videomode.h>
21#include <video/videomode.h> 21#include <video/videomode.h>
22 22
23#include "exynos_drm_drv.h" 23#include "exynos_drm_crtc.h"
24 24
25struct exynos_dpi { 25struct exynos_dpi {
26 struct exynos_drm_display display; 26 struct drm_encoder encoder;
27 struct device *dev; 27 struct device *dev;
28 struct device_node *panel_node; 28 struct device_node *panel_node;
29 29
30 struct drm_panel *panel; 30 struct drm_panel *panel;
31 struct drm_connector connector; 31 struct drm_connector connector;
32 struct drm_encoder *encoder;
33 32
34 struct videomode *vm; 33 struct videomode *vm;
35 int dpms_mode;
36}; 34};
37 35
38#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector) 36#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
39 37
40static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d) 38static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
41{ 39{
42 return container_of(d, struct exynos_dpi, display); 40 return container_of(e, struct exynos_dpi, encoder);
43} 41}
44 42
45static enum drm_connector_status 43static enum drm_connector_status
@@ -99,7 +97,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
99{ 97{
100 struct exynos_dpi *ctx = connector_to_dpi(connector); 98 struct exynos_dpi *ctx = connector_to_dpi(connector);
101 99
102 return ctx->encoder; 100 return &ctx->encoder;
103} 101}
104 102
105static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { 103static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
@@ -107,15 +105,12 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
107 .best_encoder = exynos_dpi_best_encoder, 105 .best_encoder = exynos_dpi_best_encoder,
108}; 106};
109 107
110static int exynos_dpi_create_connector(struct exynos_drm_display *display, 108static int exynos_dpi_create_connector(struct drm_encoder *encoder)
111 struct drm_encoder *encoder)
112{ 109{
113 struct exynos_dpi *ctx = display_to_dpi(display); 110 struct exynos_dpi *ctx = encoder_to_dpi(encoder);
114 struct drm_connector *connector = &ctx->connector; 111 struct drm_connector *connector = &ctx->connector;
115 int ret; 112 int ret;
116 113
117 ctx->encoder = encoder;
118
119 connector->polled = DRM_CONNECTOR_POLL_HPD; 114 connector->polled = DRM_CONNECTOR_POLL_HPD;
120 115
121 ret = drm_connector_init(encoder->dev, connector, 116 ret = drm_connector_init(encoder->dev, connector,
@@ -133,46 +128,48 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
133 return 0; 128 return 0;
134} 129}
135 130
136static void exynos_dpi_poweron(struct exynos_dpi *ctx) 131static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
132 const struct drm_display_mode *mode,
133 struct drm_display_mode *adjusted_mode)
134{
135 return true;
136}
137
138static void exynos_dpi_mode_set(struct drm_encoder *encoder,
139 struct drm_display_mode *mode,
140 struct drm_display_mode *adjusted_mode)
137{ 141{
142}
143
144static void exynos_dpi_enable(struct drm_encoder *encoder)
145{
146 struct exynos_dpi *ctx = encoder_to_dpi(encoder);
147
138 if (ctx->panel) { 148 if (ctx->panel) {
139 drm_panel_prepare(ctx->panel); 149 drm_panel_prepare(ctx->panel);
140 drm_panel_enable(ctx->panel); 150 drm_panel_enable(ctx->panel);
141 } 151 }
142} 152}
143 153
144static void exynos_dpi_poweroff(struct exynos_dpi *ctx) 154static void exynos_dpi_disable(struct drm_encoder *encoder)
145{ 155{
156 struct exynos_dpi *ctx = encoder_to_dpi(encoder);
157
146 if (ctx->panel) { 158 if (ctx->panel) {
147 drm_panel_disable(ctx->panel); 159 drm_panel_disable(ctx->panel);
148 drm_panel_unprepare(ctx->panel); 160 drm_panel_unprepare(ctx->panel);
149 } 161 }
150} 162}
151 163
152static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode) 164static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
153{ 165 .mode_fixup = exynos_dpi_mode_fixup,
154 struct exynos_dpi *ctx = display_to_dpi(display); 166 .mode_set = exynos_dpi_mode_set,
155 167 .enable = exynos_dpi_enable,
156 switch (mode) { 168 .disable = exynos_dpi_disable,
157 case DRM_MODE_DPMS_ON: 169};
158 if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
159 exynos_dpi_poweron(ctx);
160 break;
161 case DRM_MODE_DPMS_STANDBY:
162 case DRM_MODE_DPMS_SUSPEND:
163 case DRM_MODE_DPMS_OFF:
164 if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
165 exynos_dpi_poweroff(ctx);
166 break;
167 default:
168 break;
169 }
170 ctx->dpms_mode = mode;
171}
172 170
173static struct exynos_drm_display_ops exynos_dpi_display_ops = { 171static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
174 .create_connector = exynos_dpi_create_connector, 172 .destroy = drm_encoder_cleanup,
175 .dpms = exynos_dpi_dpms
176}; 173};
177 174
178/* of_* functions will be removed after merge of of_graph patches */ 175/* of_* functions will be removed after merge of of_graph patches */
@@ -299,7 +296,34 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
299 return 0; 296 return 0;
300} 297}
301 298
302struct exynos_drm_display *exynos_dpi_probe(struct device *dev) 299int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
300{
301 int ret;
302
303 ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
304 if (ret < 0)
305 return ret;
306
307 encoder->possible_crtcs = 1 << ret;
308
309 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
310
311 drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
312 DRM_MODE_ENCODER_TMDS);
313
314 drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
315
316 ret = exynos_dpi_create_connector(encoder);
317 if (ret) {
318 DRM_ERROR("failed to create connector ret = %d\n", ret);
319 drm_encoder_cleanup(encoder);
320 return ret;
321 }
322
323 return 0;
324}
325
326struct drm_encoder *exynos_dpi_probe(struct device *dev)
303{ 327{
304 struct exynos_dpi *ctx; 328 struct exynos_dpi *ctx;
305 int ret; 329 int ret;
@@ -308,10 +332,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
308 if (!ctx) 332 if (!ctx)
309 return ERR_PTR(-ENOMEM); 333 return ERR_PTR(-ENOMEM);
310 334
311 ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
312 ctx->display.ops = &exynos_dpi_display_ops;
313 ctx->dev = dev; 335 ctx->dev = dev;
314 ctx->dpms_mode = DRM_MODE_DPMS_OFF;
315 336
316 ret = exynos_dpi_parse_dt(ctx); 337 ret = exynos_dpi_parse_dt(ctx);
317 if (ret < 0) { 338 if (ret < 0) {
@@ -325,14 +346,14 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
325 return ERR_PTR(-EPROBE_DEFER); 346 return ERR_PTR(-EPROBE_DEFER);
326 } 347 }
327 348
328 return &ctx->display; 349 return &ctx->encoder;
329} 350}
330 351
331int exynos_dpi_remove(struct exynos_drm_display *display) 352int exynos_dpi_remove(struct drm_encoder *encoder)
332{ 353{
333 struct exynos_dpi *ctx = display_to_dpi(display); 354 struct exynos_dpi *ctx = encoder_to_dpi(encoder);
334 355
335 exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF); 356 exynos_dpi_disable(&ctx->encoder);
336 357
337 if (ctx->panel) 358 if (ctx->panel)
338 drm_panel_detach(ctx->panel); 359 drm_panel_detach(ctx->panel);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 63a68c60a353..fa5194caf259 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -21,13 +21,11 @@
21 21
22#include "exynos_drm_drv.h" 22#include "exynos_drm_drv.h"
23#include "exynos_drm_crtc.h" 23#include "exynos_drm_crtc.h"
24#include "exynos_drm_encoder.h"
25#include "exynos_drm_fbdev.h" 24#include "exynos_drm_fbdev.h"
26#include "exynos_drm_fb.h" 25#include "exynos_drm_fb.h"
27#include "exynos_drm_gem.h" 26#include "exynos_drm_gem.h"
28#include "exynos_drm_plane.h" 27#include "exynos_drm_plane.h"
29#include "exynos_drm_vidi.h" 28#include "exynos_drm_vidi.h"
30#include "exynos_drm_dmabuf.h"
31#include "exynos_drm_g2d.h" 29#include "exynos_drm_g2d.h"
32#include "exynos_drm_ipp.h" 30#include "exynos_drm_ipp.h"
33#include "exynos_drm_iommu.h" 31#include "exynos_drm_iommu.h"
@@ -41,7 +39,9 @@
41static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 39static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
42{ 40{
43 struct exynos_drm_private *private; 41 struct exynos_drm_private *private;
44 int ret; 42 struct drm_encoder *encoder;
43 unsigned int clone_mask;
44 int cnt, ret;
45 45
46 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); 46 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
47 if (!private) 47 if (!private)
@@ -67,7 +67,13 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
67 exynos_drm_mode_config_init(dev); 67 exynos_drm_mode_config_init(dev);
68 68
69 /* setup possible_clones. */ 69 /* setup possible_clones. */
70 exynos_drm_encoder_setup(dev); 70 cnt = 0;
71 clone_mask = 0;
72 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
73 clone_mask |= (1 << (cnt++));
74
75 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
76 encoder->possible_clones = clone_mask;
71 77
72 platform_set_drvdata(dev->platformdev, dev); 78 platform_set_drvdata(dev->platformdev, dev);
73 79
@@ -297,8 +303,12 @@ static struct drm_driver exynos_drm_driver = {
297 .dumb_destroy = drm_gem_dumb_destroy, 303 .dumb_destroy = drm_gem_dumb_destroy,
298 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 304 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
299 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 305 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
300 .gem_prime_export = exynos_dmabuf_prime_export, 306 .gem_prime_export = drm_gem_prime_export,
301 .gem_prime_import = exynos_dmabuf_prime_import, 307 .gem_prime_import = drm_gem_prime_import,
308 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
309 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
310 .gem_prime_vmap = exynos_drm_gem_prime_vmap,
311 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
302 .ioctls = exynos_ioctls, 312 .ioctls = exynos_ioctls,
303 .num_ioctls = ARRAY_SIZE(exynos_ioctls), 313 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
304 .fops = &exynos_drm_driver_fops, 314 .fops = &exynos_drm_driver_fops,
@@ -345,9 +355,6 @@ static struct platform_driver exynos_drm_platform_driver;
345 * because connector requires pipe number of its crtc during initialization. 355 * because connector requires pipe number of its crtc during initialization.
346 */ 356 */
347static struct platform_driver *const exynos_drm_kms_drivers[] = { 357static struct platform_driver *const exynos_drm_kms_drivers[] = {
348#ifdef CONFIG_DRM_EXYNOS_VIDI
349 &vidi_driver,
350#endif
351#ifdef CONFIG_DRM_EXYNOS_FIMD 358#ifdef CONFIG_DRM_EXYNOS_FIMD
352 &fimd_driver, 359 &fimd_driver,
353#endif 360#endif
@@ -370,6 +377,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
370 &mixer_driver, 377 &mixer_driver,
371 &hdmi_driver, 378 &hdmi_driver,
372#endif 379#endif
380#ifdef CONFIG_DRM_EXYNOS_VIDI
381 &vidi_driver,
382#endif
373}; 383};
374 384
375static struct platform_driver *const exynos_drm_non_kms_drivers[] = { 385static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index dd00f160c1e5..6b8a30f23473 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -44,23 +44,14 @@ enum exynos_drm_output_type {
44 * - the unit is screen coordinates. 44 * - the unit is screen coordinates.
45 * @src_y: offset y on a framebuffer to be displayed. 45 * @src_y: offset y on a framebuffer to be displayed.
46 * - the unit is screen coordinates. 46 * - the unit is screen coordinates.
47 * @src_width: width of a partial image to be displayed from framebuffer. 47 * @src_w: width of a partial image to be displayed from framebuffer.
48 * @src_height: height of a partial image to be displayed from framebuffer. 48 * @src_h: height of a partial image to be displayed from framebuffer.
49 * @fb_width: width of a framebuffer.
50 * @fb_height: height of a framebuffer.
51 * @crtc_x: offset x on hardware screen. 49 * @crtc_x: offset x on hardware screen.
52 * @crtc_y: offset y on hardware screen. 50 * @crtc_y: offset y on hardware screen.
53 * @crtc_width: window width to be displayed (hardware screen). 51 * @crtc_w: window width to be displayed (hardware screen).
54 * @crtc_height: window height to be displayed (hardware screen). 52 * @crtc_h: window height to be displayed (hardware screen).
55 * @mode_width: width of screen mode.
56 * @mode_height: height of screen mode.
57 * @h_ratio: horizontal scaling ratio, 16.16 fixed point 53 * @h_ratio: horizontal scaling ratio, 16.16 fixed point
58 * @v_ratio: vertical scaling ratio, 16.16 fixed point 54 * @v_ratio: vertical scaling ratio, 16.16 fixed point
59 * @refresh: refresh rate.
60 * @scan_flag: interlace or progressive way.
61 * (it could be DRM_MODE_FLAG_*)
62 * @bpp: pixel size.(in bit)
63 * @pixel_format: fourcc pixel format of this overlay
64 * @dma_addr: array of bus(accessed by dma) address to the memory region 55 * @dma_addr: array of bus(accessed by dma) address to the memory region
65 * allocated for a overlay. 56 * allocated for a overlay.
66 * @zpos: order of overlay layer(z position). 57 * @zpos: order of overlay layer(z position).
@@ -73,76 +64,19 @@ struct exynos_drm_plane {
73 struct drm_plane base; 64 struct drm_plane base;
74 unsigned int src_x; 65 unsigned int src_x;
75 unsigned int src_y; 66 unsigned int src_y;
76 unsigned int src_width; 67 unsigned int src_w;
77 unsigned int src_height; 68 unsigned int src_h;
78 unsigned int fb_width;
79 unsigned int fb_height;
80 unsigned int crtc_x; 69 unsigned int crtc_x;
81 unsigned int crtc_y; 70 unsigned int crtc_y;
82 unsigned int crtc_width; 71 unsigned int crtc_w;
83 unsigned int crtc_height; 72 unsigned int crtc_h;
84 unsigned int mode_width;
85 unsigned int mode_height;
86 unsigned int h_ratio; 73 unsigned int h_ratio;
87 unsigned int v_ratio; 74 unsigned int v_ratio;
88 unsigned int refresh;
89 unsigned int scan_flag;
90 unsigned int bpp;
91 unsigned int pitch;
92 uint32_t pixel_format;
93 dma_addr_t dma_addr[MAX_FB_BUFFER]; 75 dma_addr_t dma_addr[MAX_FB_BUFFER];
94 unsigned int zpos; 76 unsigned int zpos;
95}; 77};
96 78
97/* 79/*
98 * Exynos DRM Display Structure.
99 * - this structure is common to analog tv, digital tv and lcd panel.
100 *
101 * @create_connector: initialize and register a new connector
102 * @remove: cleans up the display for removal
103 * @mode_fixup: fix mode data comparing to hw specific display mode.
104 * @mode_set: convert drm_display_mode to hw specific display mode and
105 * would be called by encoder->mode_set().
106 * @check_mode: check if mode is valid or not.
107 * @dpms: display device on or off.
108 * @commit: apply changes to hw
109 */
110struct exynos_drm_display;
111struct exynos_drm_display_ops {
112 int (*create_connector)(struct exynos_drm_display *display,
113 struct drm_encoder *encoder);
114 void (*remove)(struct exynos_drm_display *display);
115 void (*mode_fixup)(struct exynos_drm_display *display,
116 struct drm_connector *connector,
117 const struct drm_display_mode *mode,
118 struct drm_display_mode *adjusted_mode);
119 void (*mode_set)(struct exynos_drm_display *display,
120 struct drm_display_mode *mode);
121 int (*check_mode)(struct exynos_drm_display *display,
122 struct drm_display_mode *mode);
123 void (*dpms)(struct exynos_drm_display *display, int mode);
124 void (*commit)(struct exynos_drm_display *display);
125};
126
127/*
128 * Exynos drm display structure, maps 1:1 with an encoder/connector
129 *
130 * @list: the list entry for this manager
131 * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
132 * @encoder: encoder object this display maps to
133 * @connector: connector object this display maps to
134 * @ops: pointer to callbacks for exynos drm specific functionality
135 * @ctx: A pointer to the display's implementation specific context
136 */
137struct exynos_drm_display {
138 struct list_head list;
139 enum exynos_drm_output_type type;
140 struct drm_encoder *encoder;
141 struct drm_connector *connector;
142 struct exynos_drm_display_ops *ops;
143};
144
145/*
146 * Exynos drm crtc ops 80 * Exynos drm crtc ops
147 * 81 *
148 * @enable: enable the device 82 * @enable: enable the device
@@ -153,8 +87,8 @@ struct exynos_drm_display {
153 * @disable_vblank: specific driver callback for disabling vblank interrupt. 87 * @disable_vblank: specific driver callback for disabling vblank interrupt.
154 * @wait_for_vblank: wait for vblank interrupt to make sure that 88 * @wait_for_vblank: wait for vblank interrupt to make sure that
155 * hardware overlay is updated. 89 * hardware overlay is updated.
156 * @win_commit: apply hardware specific overlay data to registers. 90 * @update_plane: apply hardware specific overlay data to registers.
157 * @win_disable: disable hardware specific overlay. 91 * @disable_plane: disable hardware specific overlay.
158 * @te_handler: trigger to transfer video image at the tearing effect 92 * @te_handler: trigger to transfer video image at the tearing effect
159 * synchronization signal if there is a page flip request. 93 * synchronization signal if there is a page flip request.
160 * @clock_enable: optional function enabling/disabling display domain clock, 94 * @clock_enable: optional function enabling/disabling display domain clock,
@@ -173,11 +107,12 @@ struct exynos_drm_crtc_ops {
173 int (*enable_vblank)(struct exynos_drm_crtc *crtc); 107 int (*enable_vblank)(struct exynos_drm_crtc *crtc);
174 void (*disable_vblank)(struct exynos_drm_crtc *crtc); 108 void (*disable_vblank)(struct exynos_drm_crtc *crtc);
175 void (*wait_for_vblank)(struct exynos_drm_crtc *crtc); 109 void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
176 void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos); 110 void (*update_plane)(struct exynos_drm_crtc *crtc,
177 void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos); 111 struct exynos_drm_plane *plane);
112 void (*disable_plane)(struct exynos_drm_crtc *crtc,
113 struct exynos_drm_plane *plane);
178 void (*te_handler)(struct exynos_drm_crtc *crtc); 114 void (*te_handler)(struct exynos_drm_crtc *crtc);
179 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable); 115 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
180 void (*clear_channels)(struct exynos_drm_crtc *crtc);
181}; 116};
182 117
183/* 118/*
@@ -285,20 +220,23 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
285void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 220void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
286 221
287#ifdef CONFIG_DRM_EXYNOS_DPI 222#ifdef CONFIG_DRM_EXYNOS_DPI
288struct exynos_drm_display * exynos_dpi_probe(struct device *dev); 223struct drm_encoder *exynos_dpi_probe(struct device *dev);
289int exynos_dpi_remove(struct exynos_drm_display *display); 224int exynos_dpi_remove(struct drm_encoder *encoder);
225int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder);
290#else 226#else
291static inline struct exynos_drm_display * 227static inline struct drm_encoder *
292exynos_dpi_probe(struct device *dev) { return NULL; } 228exynos_dpi_probe(struct device *dev) { return NULL; }
293static inline int exynos_dpi_remove(struct exynos_drm_display *display) 229static inline int exynos_dpi_remove(struct drm_encoder *encoder)
230{
231 return 0;
232}
233static inline int exynos_dpi_bind(struct drm_device *dev,
234 struct drm_encoder *encoder)
294{ 235{
295 return 0; 236 return 0;
296} 237}
297#endif 238#endif
298 239
299/* This function creates a encoder and a connector, and initializes them. */
300int exynos_drm_create_enc_conn(struct drm_device *dev,
301 struct exynos_drm_display *display);
302 240
303extern struct platform_driver fimd_driver; 241extern struct platform_driver fimd_driver;
304extern struct platform_driver exynos5433_decon_driver; 242extern struct platform_driver exynos5433_decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 0e58b36cb8c2..12b03b364703 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -259,7 +259,7 @@ struct exynos_dsi_driver_data {
259}; 259};
260 260
261struct exynos_dsi { 261struct exynos_dsi {
262 struct exynos_drm_display display; 262 struct drm_encoder encoder;
263 struct mipi_dsi_host dsi_host; 263 struct mipi_dsi_host dsi_host;
264 struct drm_connector connector; 264 struct drm_connector connector;
265 struct device_node *panel_node; 265 struct device_node *panel_node;
@@ -295,9 +295,9 @@ struct exynos_dsi {
295#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 295#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
296#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) 296#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
297 297
298static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d) 298static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
299{ 299{
300 return container_of(d, struct exynos_dsi, display); 300 return container_of(e, struct exynos_dsi, encoder);
301} 301}
302 302
303enum reg_idx { 303enum reg_idx {
@@ -1272,7 +1272,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
1272static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id) 1272static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
1273{ 1273{
1274 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id; 1274 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
1275 struct drm_encoder *encoder = dsi->display.encoder; 1275 struct drm_encoder *encoder = &dsi->encoder;
1276 1276
1277 if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE) 1277 if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
1278 exynos_drm_crtc_te_handler(encoder->crtc); 1278 exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1518,16 +1518,17 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1518 dev_err(dsi->dev, "cannot disable regulators %d\n", ret); 1518 dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
1519} 1519}
1520 1520
1521static int exynos_dsi_enable(struct exynos_dsi *dsi) 1521static void exynos_dsi_enable(struct drm_encoder *encoder)
1522{ 1522{
1523 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1523 int ret; 1524 int ret;
1524 1525
1525 if (dsi->state & DSIM_STATE_ENABLED) 1526 if (dsi->state & DSIM_STATE_ENABLED)
1526 return 0; 1527 return;
1527 1528
1528 ret = exynos_dsi_poweron(dsi); 1529 ret = exynos_dsi_poweron(dsi);
1529 if (ret < 0) 1530 if (ret < 0)
1530 return ret; 1531 return;
1531 1532
1532 dsi->state |= DSIM_STATE_ENABLED; 1533 dsi->state |= DSIM_STATE_ENABLED;
1533 1534
@@ -1535,7 +1536,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1535 if (ret < 0) { 1536 if (ret < 0) {
1536 dsi->state &= ~DSIM_STATE_ENABLED; 1537 dsi->state &= ~DSIM_STATE_ENABLED;
1537 exynos_dsi_poweroff(dsi); 1538 exynos_dsi_poweroff(dsi);
1538 return ret; 1539 return;
1539 } 1540 }
1540 1541
1541 exynos_dsi_set_display_mode(dsi); 1542 exynos_dsi_set_display_mode(dsi);
@@ -1547,16 +1548,16 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1547 exynos_dsi_set_display_enable(dsi, false); 1548 exynos_dsi_set_display_enable(dsi, false);
1548 drm_panel_unprepare(dsi->panel); 1549 drm_panel_unprepare(dsi->panel);
1549 exynos_dsi_poweroff(dsi); 1550 exynos_dsi_poweroff(dsi);
1550 return ret; 1551 return;
1551 } 1552 }
1552 1553
1553 dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; 1554 dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
1554
1555 return 0;
1556} 1555}
1557 1556
1558static void exynos_dsi_disable(struct exynos_dsi *dsi) 1557static void exynos_dsi_disable(struct drm_encoder *encoder)
1559{ 1558{
1559 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1560
1560 if (!(dsi->state & DSIM_STATE_ENABLED)) 1561 if (!(dsi->state & DSIM_STATE_ENABLED))
1561 return; 1562 return;
1562 1563
@@ -1571,26 +1572,6 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
1571 exynos_dsi_poweroff(dsi); 1572 exynos_dsi_poweroff(dsi);
1572} 1573}
1573 1574
1574static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
1575{
1576 struct exynos_dsi *dsi = display_to_dsi(display);
1577
1578 if (dsi->panel) {
1579 switch (mode) {
1580 case DRM_MODE_DPMS_ON:
1581 exynos_dsi_enable(dsi);
1582 break;
1583 case DRM_MODE_DPMS_STANDBY:
1584 case DRM_MODE_DPMS_SUSPEND:
1585 case DRM_MODE_DPMS_OFF:
1586 exynos_dsi_disable(dsi);
1587 break;
1588 default:
1589 break;
1590 }
1591 }
1592}
1593
1594static enum drm_connector_status 1575static enum drm_connector_status
1595exynos_dsi_detect(struct drm_connector *connector, bool force) 1576exynos_dsi_detect(struct drm_connector *connector, bool force)
1596{ 1577{
@@ -1601,10 +1582,10 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
1601 if (dsi->panel) 1582 if (dsi->panel)
1602 drm_panel_attach(dsi->panel, &dsi->connector); 1583 drm_panel_attach(dsi->panel, &dsi->connector);
1603 } else if (!dsi->panel_node) { 1584 } else if (!dsi->panel_node) {
1604 struct exynos_drm_display *display; 1585 struct drm_encoder *encoder;
1605 1586
1606 display = platform_get_drvdata(to_platform_device(dsi->dev)); 1587 encoder = platform_get_drvdata(to_platform_device(dsi->dev));
1607 exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF); 1588 exynos_dsi_disable(encoder);
1608 drm_panel_detach(dsi->panel); 1589 drm_panel_detach(dsi->panel);
1609 dsi->panel = NULL; 1590 dsi->panel = NULL;
1610 } 1591 }
@@ -1647,7 +1628,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
1647{ 1628{
1648 struct exynos_dsi *dsi = connector_to_dsi(connector); 1629 struct exynos_dsi *dsi = connector_to_dsi(connector);
1649 1630
1650 return dsi->display.encoder; 1631 return &dsi->encoder;
1651} 1632}
1652 1633
1653static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { 1634static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1655,10 +1636,9 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1655 .best_encoder = exynos_dsi_best_encoder, 1636 .best_encoder = exynos_dsi_best_encoder,
1656}; 1637};
1657 1638
1658static int exynos_dsi_create_connector(struct exynos_drm_display *display, 1639static int exynos_dsi_create_connector(struct drm_encoder *encoder)
1659 struct drm_encoder *encoder)
1660{ 1640{
1661 struct exynos_dsi *dsi = display_to_dsi(display); 1641 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1662 struct drm_connector *connector = &dsi->connector; 1642 struct drm_connector *connector = &dsi->connector;
1663 int ret; 1643 int ret;
1664 1644
@@ -1679,26 +1659,40 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
1679 return 0; 1659 return 0;
1680} 1660}
1681 1661
1682static void exynos_dsi_mode_set(struct exynos_drm_display *display, 1662static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
1683 struct drm_display_mode *mode) 1663 const struct drm_display_mode *mode,
1664 struct drm_display_mode *adjusted_mode)
1684{ 1665{
1685 struct exynos_dsi *dsi = display_to_dsi(display); 1666 return true;
1686 struct videomode *vm = &dsi->vm; 1667}
1687 1668
1688 vm->hactive = mode->hdisplay; 1669static void exynos_dsi_mode_set(struct drm_encoder *encoder,
1689 vm->vactive = mode->vdisplay; 1670 struct drm_display_mode *mode,
1690 vm->vfront_porch = mode->vsync_start - mode->vdisplay; 1671 struct drm_display_mode *adjusted_mode)
1691 vm->vback_porch = mode->vtotal - mode->vsync_end; 1672{
1692 vm->vsync_len = mode->vsync_end - mode->vsync_start; 1673 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1693 vm->hfront_porch = mode->hsync_start - mode->hdisplay; 1674 struct videomode *vm = &dsi->vm;
1694 vm->hback_porch = mode->htotal - mode->hsync_end; 1675 struct drm_display_mode *m = adjusted_mode;
1695 vm->hsync_len = mode->hsync_end - mode->hsync_start; 1676
1677 vm->hactive = m->hdisplay;
1678 vm->vactive = m->vdisplay;
1679 vm->vfront_porch = m->vsync_start - m->vdisplay;
1680 vm->vback_porch = m->vtotal - m->vsync_end;
1681 vm->vsync_len = m->vsync_end - m->vsync_start;
1682 vm->hfront_porch = m->hsync_start - m->hdisplay;
1683 vm->hback_porch = m->htotal - m->hsync_end;
1684 vm->hsync_len = m->hsync_end - m->hsync_start;
1696} 1685}
1697 1686
1698static struct exynos_drm_display_ops exynos_dsi_display_ops = { 1687static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
1699 .create_connector = exynos_dsi_create_connector, 1688 .mode_fixup = exynos_dsi_mode_fixup,
1700 .mode_set = exynos_dsi_mode_set, 1689 .mode_set = exynos_dsi_mode_set,
1701 .dpms = exynos_dsi_dpms 1690 .enable = exynos_dsi_enable,
1691 .disable = exynos_dsi_disable,
1692};
1693
1694static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
1695 .destroy = drm_encoder_cleanup,
1702}; 1696};
1703 1697
1704MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); 1698MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1821,22 +1815,35 @@ end:
1821static int exynos_dsi_bind(struct device *dev, struct device *master, 1815static int exynos_dsi_bind(struct device *dev, struct device *master,
1822 void *data) 1816 void *data)
1823{ 1817{
1824 struct exynos_drm_display *display = dev_get_drvdata(dev); 1818 struct drm_encoder *encoder = dev_get_drvdata(dev);
1825 struct exynos_dsi *dsi = display_to_dsi(display); 1819 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1826 struct drm_device *drm_dev = data; 1820 struct drm_device *drm_dev = data;
1827 struct drm_bridge *bridge; 1821 struct drm_bridge *bridge;
1828 int ret; 1822 int ret;
1829 1823
1830 ret = exynos_drm_create_enc_conn(drm_dev, display); 1824 ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
1825 EXYNOS_DISPLAY_TYPE_LCD);
1826 if (ret < 0)
1827 return ret;
1828
1829 encoder->possible_crtcs = 1 << ret;
1830
1831 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1832
1833 drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
1834 DRM_MODE_ENCODER_TMDS);
1835
1836 drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
1837
1838 ret = exynos_dsi_create_connector(encoder);
1831 if (ret) { 1839 if (ret) {
1832 DRM_ERROR("Encoder create [%d] failed with %d\n", 1840 DRM_ERROR("failed to create connector ret = %d\n", ret);
1833 display->type, ret); 1841 drm_encoder_cleanup(encoder);
1834 return ret; 1842 return ret;
1835 } 1843 }
1836 1844
1837 bridge = of_drm_find_bridge(dsi->bridge_node); 1845 bridge = of_drm_find_bridge(dsi->bridge_node);
1838 if (bridge) { 1846 if (bridge) {
1839 display->encoder->bridge = bridge;
1840 drm_bridge_attach(drm_dev, bridge); 1847 drm_bridge_attach(drm_dev, bridge);
1841 } 1848 }
1842 1849
@@ -1846,10 +1853,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1846static void exynos_dsi_unbind(struct device *dev, struct device *master, 1853static void exynos_dsi_unbind(struct device *dev, struct device *master,
1847 void *data) 1854 void *data)
1848{ 1855{
1849 struct exynos_drm_display *display = dev_get_drvdata(dev); 1856 struct drm_encoder *encoder = dev_get_drvdata(dev);
1850 struct exynos_dsi *dsi = display_to_dsi(display); 1857 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1851 1858
1852 exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF); 1859 exynos_dsi_disable(encoder);
1853 1860
1854 mipi_dsi_host_unregister(&dsi->dsi_host); 1861 mipi_dsi_host_unregister(&dsi->dsi_host);
1855} 1862}
@@ -1870,9 +1877,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1870 if (!dsi) 1877 if (!dsi)
1871 return -ENOMEM; 1878 return -ENOMEM;
1872 1879
1873 dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
1874 dsi->display.ops = &exynos_dsi_display_ops;
1875
1876 /* To be checked as invalid one */ 1880 /* To be checked as invalid one */
1877 dsi->te_gpio = -ENOENT; 1881 dsi->te_gpio = -ENOENT;
1878 1882
@@ -1948,7 +1952,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1948 return ret; 1952 return ret;
1949 } 1953 }
1950 1954
1951 platform_set_drvdata(pdev, &dsi->display); 1955 platform_set_drvdata(pdev, &dsi->encoder);
1952 1956
1953 return component_add(dev, &exynos_dsi_component_ops); 1957 return component_add(dev, &exynos_dsi_component_ops);
1954} 1958}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
deleted file mode 100644
index 7b89fd520e45..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/* exynos_drm_encoder.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h>
17
18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h"
20
21#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
22 drm_encoder)
23
24/*
25 * exynos specific encoder structure.
26 *
27 * @drm_encoder: encoder object.
28 * @display: the display structure that maps to this encoder
29 */
30struct exynos_drm_encoder {
31 struct drm_encoder drm_encoder;
32 struct exynos_drm_display *display;
33};
34
35static bool
36exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
37 const struct drm_display_mode *mode,
38 struct drm_display_mode *adjusted_mode)
39{
40 struct drm_device *dev = encoder->dev;
41 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
42 struct exynos_drm_display *display = exynos_encoder->display;
43 struct drm_connector *connector;
44
45 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
46 if (connector->encoder != encoder)
47 continue;
48
49 if (display->ops->mode_fixup)
50 display->ops->mode_fixup(display, connector, mode,
51 adjusted_mode);
52 }
53
54 return true;
55}
56
57static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
58 struct drm_display_mode *mode,
59 struct drm_display_mode *adjusted_mode)
60{
61 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
62 struct exynos_drm_display *display = exynos_encoder->display;
63
64 if (display->ops->mode_set)
65 display->ops->mode_set(display, adjusted_mode);
66}
67
68static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
69{
70 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
71 struct exynos_drm_display *display = exynos_encoder->display;
72
73 if (display->ops->dpms)
74 display->ops->dpms(display, DRM_MODE_DPMS_ON);
75
76 if (display->ops->commit)
77 display->ops->commit(display);
78}
79
80static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
81{
82 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
83 struct exynos_drm_display *display = exynos_encoder->display;
84
85 if (display->ops->dpms)
86 display->ops->dpms(display, DRM_MODE_DPMS_OFF);
87}
88
89static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
90 .mode_fixup = exynos_drm_encoder_mode_fixup,
91 .mode_set = exynos_drm_encoder_mode_set,
92 .enable = exynos_drm_encoder_enable,
93 .disable = exynos_drm_encoder_disable,
94};
95
96static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
97{
98 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
99
100 drm_encoder_cleanup(encoder);
101 kfree(exynos_encoder);
102}
103
104static struct drm_encoder_funcs exynos_encoder_funcs = {
105 .destroy = exynos_drm_encoder_destroy,
106};
107
108static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
109{
110 struct drm_encoder *clone;
111 struct drm_device *dev = encoder->dev;
112 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
113 struct exynos_drm_display *display = exynos_encoder->display;
114 unsigned int clone_mask = 0;
115 int cnt = 0;
116
117 list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
118 switch (display->type) {
119 case EXYNOS_DISPLAY_TYPE_LCD:
120 case EXYNOS_DISPLAY_TYPE_HDMI:
121 case EXYNOS_DISPLAY_TYPE_VIDI:
122 clone_mask |= (1 << (cnt++));
123 break;
124 default:
125 continue;
126 }
127 }
128
129 return clone_mask;
130}
131
132void exynos_drm_encoder_setup(struct drm_device *dev)
133{
134 struct drm_encoder *encoder;
135
136 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
137 encoder->possible_clones = exynos_drm_encoder_clones(encoder);
138}
139
140struct drm_encoder *
141exynos_drm_encoder_create(struct drm_device *dev,
142 struct exynos_drm_display *display,
143 unsigned long possible_crtcs)
144{
145 struct drm_encoder *encoder;
146 struct exynos_drm_encoder *exynos_encoder;
147
148 if (!possible_crtcs)
149 return NULL;
150
151 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
152 if (!exynos_encoder)
153 return NULL;
154
155 exynos_encoder->display = display;
156 encoder = &exynos_encoder->drm_encoder;
157 encoder->possible_crtcs = possible_crtcs;
158
159 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
160
161 drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
162 DRM_MODE_ENCODER_TMDS);
163
164 drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
165
166 DRM_DEBUG_KMS("encoder has been created\n");
167
168 return encoder;
169}
170
171struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
172{
173 return to_exynos_encoder(encoder)->display;
174}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
deleted file mode 100644
index 26305d8dd93a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_ENCODER_H_
15#define _EXYNOS_DRM_ENCODER_H_
16
17void exynos_drm_encoder_setup(struct drm_device *dev);
18struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
19 struct exynos_drm_display *mgr,
20 unsigned long possible_crtcs);
21struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
22
23#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 2b6320e6eae2..9738f4e0c6eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -238,22 +238,22 @@ err_free:
238 return ERR_PTR(ret); 238 return ERR_PTR(ret);
239} 239}
240 240
241struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, 241struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
242 int index) 242 int index)
243{ 243{
244 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 244 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
245 struct exynos_drm_gem_buf *buffer; 245 struct exynos_drm_gem_obj *obj;
246 246
247 if (index >= MAX_FB_BUFFER) 247 if (index >= MAX_FB_BUFFER)
248 return NULL; 248 return NULL;
249 249
250 buffer = exynos_fb->exynos_gem_obj[index]->buffer; 250 obj = exynos_fb->exynos_gem_obj[index];
251 if (!buffer) 251 if (!obj)
252 return NULL; 252 return NULL;
253 253
254 DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr); 254 DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
255 255
256 return buffer; 256 return obj;
257} 257}
258 258
259static void exynos_drm_output_poll_changed(struct drm_device *dev) 259static void exynos_drm_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 517471b37566..1c9e27c32cd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -19,8 +19,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
19 struct drm_mode_fb_cmd2 *mode_cmd, 19 struct drm_mode_fb_cmd2 *mode_cmd,
20 struct drm_gem_object *obj); 20 struct drm_gem_object *obj);
21 21
22/* get memory information of a drm framebuffer */ 22/* get gem object of a drm framebuffer */
23struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, 23struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
24 int index); 24 int index);
25 25
26void exynos_drm_mode_config_init(struct drm_device *dev); 26void exynos_drm_mode_config_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e0b085b4bdfa..624595afbce0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -40,8 +40,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
40{ 40{
41 struct drm_fb_helper *helper = info->par; 41 struct drm_fb_helper *helper = info->par;
42 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); 42 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
43 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; 43 struct exynos_drm_gem_obj *obj = exynos_fbd->exynos_gem_obj;
44 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
45 unsigned long vm_size; 44 unsigned long vm_size;
46 int ret; 45 int ret;
47 46
@@ -49,11 +48,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
49 48
50 vm_size = vma->vm_end - vma->vm_start; 49 vm_size = vma->vm_end - vma->vm_start;
51 50
52 if (vm_size > buffer->size) 51 if (vm_size > obj->size)
53 return -EINVAL; 52 return -EINVAL;
54 53
55 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages, 54 ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
56 buffer->dma_addr, buffer->size, &buffer->dma_attrs); 55 obj->size, &obj->dma_attrs);
57 if (ret < 0) { 56 if (ret < 0) {
58 DRM_ERROR("failed to mmap.\n"); 57 DRM_ERROR("failed to mmap.\n");
59 return ret; 58 return ret;
@@ -65,9 +64,9 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
65static struct fb_ops exynos_drm_fb_ops = { 64static struct fb_ops exynos_drm_fb_ops = {
66 .owner = THIS_MODULE, 65 .owner = THIS_MODULE,
67 .fb_mmap = exynos_drm_fb_mmap, 66 .fb_mmap = exynos_drm_fb_mmap,
68 .fb_fillrect = cfb_fillrect, 67 .fb_fillrect = drm_fb_helper_cfb_fillrect,
69 .fb_copyarea = cfb_copyarea, 68 .fb_copyarea = drm_fb_helper_cfb_copyarea,
70 .fb_imageblit = cfb_imageblit, 69 .fb_imageblit = drm_fb_helper_cfb_imageblit,
71 .fb_check_var = drm_fb_helper_check_var, 70 .fb_check_var = drm_fb_helper_check_var,
72 .fb_set_par = drm_fb_helper_set_par, 71 .fb_set_par = drm_fb_helper_set_par,
73 .fb_blank = drm_fb_helper_blank, 72 .fb_blank = drm_fb_helper_blank,
@@ -80,7 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
80 struct drm_framebuffer *fb) 79 struct drm_framebuffer *fb)
81{ 80{
82 struct fb_info *fbi = helper->fbdev; 81 struct fb_info *fbi = helper->fbdev;
83 struct exynos_drm_gem_buf *buffer; 82 struct exynos_drm_gem_obj *obj;
84 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); 83 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85 unsigned int nr_pages; 84 unsigned int nr_pages;
86 unsigned long offset; 85 unsigned long offset;
@@ -89,18 +88,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
89 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 88 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
90 89
91 /* RGB formats use only one buffer */ 90 /* RGB formats use only one buffer */
92 buffer = exynos_drm_fb_buffer(fb, 0); 91 obj = exynos_drm_fb_gem_obj(fb, 0);
93 if (!buffer) { 92 if (!obj) {
94 DRM_DEBUG_KMS("buffer is null.\n"); 93 DRM_DEBUG_KMS("gem object is null.\n");
95 return -EFAULT; 94 return -EFAULT;
96 } 95 }
97 96
98 nr_pages = buffer->size >> PAGE_SHIFT; 97 nr_pages = obj->size >> PAGE_SHIFT;
99 98
100 buffer->kvaddr = (void __iomem *) vmap(buffer->pages, 99 obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
101 nr_pages, VM_MAP,
102 pgprot_writecombine(PAGE_KERNEL)); 100 pgprot_writecombine(PAGE_KERNEL));
103 if (!buffer->kvaddr) { 101 if (!obj->kvaddr) {
104 DRM_ERROR("failed to map pages to kernel space.\n"); 102 DRM_ERROR("failed to map pages to kernel space.\n");
105 return -EIO; 103 return -EIO;
106 } 104 }
@@ -111,7 +109,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
111 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 109 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
112 offset += fbi->var.yoffset * fb->pitches[0]; 110 offset += fbi->var.yoffset * fb->pitches[0];
113 111
114 fbi->screen_base = buffer->kvaddr + offset; 112 fbi->screen_base = obj->kvaddr + offset;
115 fbi->screen_size = size; 113 fbi->screen_size = size;
116 fbi->fix.smem_len = size; 114 fbi->fix.smem_len = size;
117 115
@@ -142,10 +140,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
142 140
143 mutex_lock(&dev->struct_mutex); 141 mutex_lock(&dev->struct_mutex);
144 142
145 fbi = framebuffer_alloc(0, &pdev->dev); 143 fbi = drm_fb_helper_alloc_fbi(helper);
146 if (!fbi) { 144 if (IS_ERR(fbi)) {
147 DRM_ERROR("failed to allocate fb info.\n"); 145 DRM_ERROR("failed to allocate fb info.\n");
148 ret = -ENOMEM; 146 ret = PTR_ERR(fbi);
149 goto out; 147 goto out;
150 } 148 }
151 149
@@ -165,7 +163,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
165 163
166 if (IS_ERR(exynos_gem_obj)) { 164 if (IS_ERR(exynos_gem_obj)) {
167 ret = PTR_ERR(exynos_gem_obj); 165 ret = PTR_ERR(exynos_gem_obj);
168 goto err_release_framebuffer; 166 goto err_release_fbi;
169 } 167 }
170 168
171 exynos_fbdev->exynos_gem_obj = exynos_gem_obj; 169 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -178,33 +176,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
178 goto err_destroy_gem; 176 goto err_destroy_gem;
179 } 177 }
180 178
181 helper->fbdev = fbi;
182
183 fbi->par = helper; 179 fbi->par = helper;
184 fbi->flags = FBINFO_FLAG_DEFAULT; 180 fbi->flags = FBINFO_FLAG_DEFAULT;
185 fbi->fbops = &exynos_drm_fb_ops; 181 fbi->fbops = &exynos_drm_fb_ops;
186 182
187 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
188 if (ret) {
189 DRM_ERROR("failed to allocate cmap.\n");
190 goto err_destroy_framebuffer;
191 }
192
193 ret = exynos_drm_fbdev_update(helper, sizes, helper->fb); 183 ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
194 if (ret < 0) 184 if (ret < 0)
195 goto err_dealloc_cmap; 185 goto err_destroy_framebuffer;
196 186
197 mutex_unlock(&dev->struct_mutex); 187 mutex_unlock(&dev->struct_mutex);
198 return ret; 188 return ret;
199 189
200err_dealloc_cmap:
201 fb_dealloc_cmap(&fbi->cmap);
202err_destroy_framebuffer: 190err_destroy_framebuffer:
203 drm_framebuffer_cleanup(helper->fb); 191 drm_framebuffer_cleanup(helper->fb);
204err_destroy_gem: 192err_destroy_gem:
205 exynos_drm_gem_destroy(exynos_gem_obj); 193 exynos_drm_gem_destroy(exynos_gem_obj);
206err_release_framebuffer: 194err_release_fbi:
207 framebuffer_release(fbi); 195 drm_fb_helper_release_fbi(helper);
208 196
209/* 197/*
210 * if failed, all resources allocated above would be released by 198 * if failed, all resources allocated above would be released by
@@ -300,8 +288,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
300 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; 288 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
301 struct drm_framebuffer *fb; 289 struct drm_framebuffer *fb;
302 290
303 if (exynos_gem_obj->buffer->kvaddr) 291 if (exynos_gem_obj->kvaddr)
304 vunmap(exynos_gem_obj->buffer->kvaddr); 292 vunmap(exynos_gem_obj->kvaddr);
305 293
306 /* release drm framebuffer and real buffer */ 294 /* release drm framebuffer and real buffer */
307 if (fb_helper->fb && fb_helper->fb->funcs) { 295 if (fb_helper->fb && fb_helper->fb->funcs) {
@@ -312,21 +300,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
312 } 300 }
313 } 301 }
314 302
315 /* release linux framebuffer */ 303 drm_fb_helper_unregister_fbi(fb_helper);
316 if (fb_helper->fbdev) { 304 drm_fb_helper_release_fbi(fb_helper);
317 struct fb_info *info;
318 int ret;
319
320 info = fb_helper->fbdev;
321 ret = unregister_framebuffer(info);
322 if (ret < 0)
323 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
324
325 if (info->cmap.len)
326 fb_dealloc_cmap(&info->cmap);
327
328 framebuffer_release(info);
329 }
330 305
331 drm_fb_helper_fini(fb_helper); 306 drm_fb_helper_fini(fb_helper);
332} 307}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
1745 spin_lock_init(&ctx->lock); 1745 spin_lock_init(&ctx->lock);
1746 platform_set_drvdata(pdev, ctx); 1746 platform_set_drvdata(pdev, ctx);
1747 1747
1748 pm_runtime_set_active(dev);
1749 pm_runtime_enable(dev); 1748 pm_runtime_enable(dev);
1750 1749
1751 ret = exynos_drm_ippdrv_register(ippdrv); 1750 ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 794e56c8798e..5def6bc073eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -169,7 +169,7 @@ struct fimd_context {
169 169
170 struct exynos_drm_panel_info panel; 170 struct exynos_drm_panel_info panel;
171 struct fimd_driver_data *driver_data; 171 struct fimd_driver_data *driver_data;
172 struct exynos_drm_display *display; 172 struct drm_encoder *encoder;
173}; 173};
174 174
175static const struct of_device_id fimd_driver_dt_match[] = { 175static const struct of_device_id fimd_driver_dt_match[] = {
@@ -348,13 +348,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
348 pm_runtime_put(ctx->dev); 348 pm_runtime_put(ctx->dev);
349} 349}
350 350
351static void fimd_iommu_detach_devices(struct fimd_context *ctx)
352{
353 /* detach this sub driver from iommu mapping if supported. */
354 if (is_drm_iommu_supported(ctx->drm_dev))
355 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
356}
357
358static u32 fimd_calc_clkdiv(struct fimd_context *ctx, 351static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
359 const struct drm_display_mode *mode) 352 const struct drm_display_mode *mode)
360{ 353{
@@ -486,9 +479,9 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
486} 479}
487 480
488 481
489static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) 482static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
483 struct drm_framebuffer *fb)
490{ 484{
491 struct exynos_drm_plane *plane = &ctx->planes[win];
492 unsigned long val; 485 unsigned long val;
493 486
494 val = WINCONx_ENWIN; 487 val = WINCONx_ENWIN;
@@ -498,11 +491,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
498 * So the request format is ARGB8888 then change it to XRGB8888. 491 * So the request format is ARGB8888 then change it to XRGB8888.
499 */ 492 */
500 if (ctx->driver_data->has_limited_fmt && !win) { 493 if (ctx->driver_data->has_limited_fmt && !win) {
501 if (plane->pixel_format == DRM_FORMAT_ARGB8888) 494 if (fb->pixel_format == DRM_FORMAT_ARGB8888)
502 plane->pixel_format = DRM_FORMAT_XRGB8888; 495 fb->pixel_format = DRM_FORMAT_XRGB8888;
503 } 496 }
504 497
505 switch (plane->pixel_format) { 498 switch (fb->pixel_format) {
506 case DRM_FORMAT_C8: 499 case DRM_FORMAT_C8:
507 val |= WINCON0_BPPMODE_8BPP_PALETTE; 500 val |= WINCON0_BPPMODE_8BPP_PALETTE;
508 val |= WINCONx_BURSTLEN_8WORD; 501 val |= WINCONx_BURSTLEN_8WORD;
@@ -538,7 +531,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
538 break; 531 break;
539 } 532 }
540 533
541 DRM_DEBUG_KMS("bpp = %d\n", plane->bpp); 534 DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
542 535
543 /* 536 /*
544 * In case of exynos, setting dma-burst to 16Word causes permanent 537 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -548,7 +541,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
548 * movement causes unstable DMA which results into iommu crash/tear. 541 * movement causes unstable DMA which results into iommu crash/tear.
549 */ 542 */
550 543
551 if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { 544 if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
552 val &= ~WINCONx_BURSTLEN_MASK; 545 val &= ~WINCONx_BURSTLEN_MASK;
553 val |= WINCONx_BURSTLEN_4WORD; 546 val |= WINCONx_BURSTLEN_4WORD;
554 } 547 }
@@ -614,21 +607,17 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
614 writel(val, ctx->regs + reg); 607 writel(val, ctx->regs + reg);
615} 608}
616 609
617static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) 610static void fimd_update_plane(struct exynos_drm_crtc *crtc,
611 struct exynos_drm_plane *plane)
618{ 612{
619 struct fimd_context *ctx = crtc->ctx; 613 struct fimd_context *ctx = crtc->ctx;
620 struct exynos_drm_plane *plane; 614 struct drm_plane_state *state = plane->base.state;
621 dma_addr_t dma_addr; 615 dma_addr_t dma_addr;
622 unsigned long val, size, offset; 616 unsigned long val, size, offset;
623 unsigned int last_x, last_y, buf_offsize, line_size; 617 unsigned int last_x, last_y, buf_offsize, line_size;
624 618 unsigned int win = plane->zpos;
625 if (ctx->suspended) 619 unsigned int bpp = state->fb->bits_per_pixel >> 3;
626 return; 620 unsigned int pitch = state->fb->pitches[0];
627
628 if (win < 0 || win >= WINDOWS_NR)
629 return;
630
631 plane = &ctx->planes[win];
632 621
633 if (ctx->suspended) 622 if (ctx->suspended)
634 return; 623 return;
@@ -647,8 +636,8 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
647 fimd_shadow_protect_win(ctx, win, true); 636 fimd_shadow_protect_win(ctx, win, true);
648 637
649 638
650 offset = plane->src_x * (plane->bpp >> 3); 639 offset = plane->src_x * bpp;
651 offset += plane->src_y * plane->pitch; 640 offset += plane->src_y * pitch;
652 641
653 /* buffer start address */ 642 /* buffer start address */
654 dma_addr = plane->dma_addr[0] + offset; 643 dma_addr = plane->dma_addr[0] + offset;
@@ -656,18 +645,18 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
656 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 645 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
657 646
658 /* buffer end address */ 647 /* buffer end address */
659 size = plane->pitch * plane->crtc_height; 648 size = pitch * plane->crtc_h;
660 val = (unsigned long)(dma_addr + size); 649 val = (unsigned long)(dma_addr + size);
661 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 650 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
662 651
663 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 652 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
664 (unsigned long)dma_addr, val, size); 653 (unsigned long)dma_addr, val, size);
665 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 654 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
666 plane->crtc_width, plane->crtc_height); 655 plane->crtc_w, plane->crtc_h);
667 656
668 /* buffer size */ 657 /* buffer size */
669 buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); 658 buf_offsize = pitch - (plane->crtc_w * bpp);
670 line_size = plane->crtc_width * (plane->bpp >> 3); 659 line_size = plane->crtc_w * bpp;
671 val = VIDW_BUF_SIZE_OFFSET(buf_offsize) | 660 val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
672 VIDW_BUF_SIZE_PAGEWIDTH(line_size) | 661 VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
673 VIDW_BUF_SIZE_OFFSET_E(buf_offsize) | 662 VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -681,10 +670,10 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
681 VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y); 670 VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
682 writel(val, ctx->regs + VIDOSD_A(win)); 671 writel(val, ctx->regs + VIDOSD_A(win));
683 672
684 last_x = plane->crtc_x + plane->crtc_width; 673 last_x = plane->crtc_x + plane->crtc_w;
685 if (last_x) 674 if (last_x)
686 last_x--; 675 last_x--;
687 last_y = plane->crtc_y + plane->crtc_height; 676 last_y = plane->crtc_y + plane->crtc_h;
688 if (last_y) 677 if (last_y)
689 last_y--; 678 last_y--;
690 679
@@ -701,13 +690,13 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
701 u32 offset = VIDOSD_D(win); 690 u32 offset = VIDOSD_D(win);
702 if (win == 0) 691 if (win == 0)
703 offset = VIDOSD_C(win); 692 offset = VIDOSD_C(win);
704 val = plane->crtc_width * plane->crtc_height; 693 val = plane->crtc_w * plane->crtc_h;
705 writel(val, ctx->regs + offset); 694 writel(val, ctx->regs + offset);
706 695
707 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); 696 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
708 } 697 }
709 698
710 fimd_win_set_pixfmt(ctx, win); 699 fimd_win_set_pixfmt(ctx, win, state->fb);
711 700
712 /* hardware window 0 doesn't support color key. */ 701 /* hardware window 0 doesn't support color key. */
713 if (win != 0) 702 if (win != 0)
@@ -725,15 +714,11 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
725 atomic_set(&ctx->win_updated, 1); 714 atomic_set(&ctx->win_updated, 1);
726} 715}
727 716
728static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 717static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
718 struct exynos_drm_plane *plane)
729{ 719{
730 struct fimd_context *ctx = crtc->ctx; 720 struct fimd_context *ctx = crtc->ctx;
731 struct exynos_drm_plane *plane; 721 unsigned int win = plane->zpos;
732
733 if (win < 0 || win >= WINDOWS_NR)
734 return;
735
736 plane = &ctx->planes[win];
737 722
738 if (ctx->suspended) 723 if (ctx->suspended)
739 return; 724 return;
@@ -795,7 +780,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
795 * a destroyed buffer later. 780 * a destroyed buffer later.
796 */ 781 */
797 for (i = 0; i < WINDOWS_NR; i++) 782 for (i = 0; i < WINDOWS_NR; i++)
798 fimd_win_disable(crtc, i); 783 fimd_disable_plane(crtc, &ctx->planes[i]);
799 784
800 fimd_enable_vblank(crtc); 785 fimd_enable_vblank(crtc);
801 fimd_wait_for_vblank(crtc); 786 fimd_wait_for_vblank(crtc);
@@ -862,7 +847,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
862 } 847 }
863 848
864 if (test_bit(0, &ctx->irq_flags)) 849 if (test_bit(0, &ctx->irq_flags))
865 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 850 drm_crtc_handle_vblank(&ctx->crtc->base);
866} 851}
867 852
868static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) 853static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
@@ -890,11 +875,10 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
890 .enable_vblank = fimd_enable_vblank, 875 .enable_vblank = fimd_enable_vblank,
891 .disable_vblank = fimd_disable_vblank, 876 .disable_vblank = fimd_disable_vblank,
892 .wait_for_vblank = fimd_wait_for_vblank, 877 .wait_for_vblank = fimd_wait_for_vblank,
893 .win_commit = fimd_win_commit, 878 .update_plane = fimd_update_plane,
894 .win_disable = fimd_win_disable, 879 .disable_plane = fimd_disable_plane,
895 .te_handler = fimd_te_handler, 880 .te_handler = fimd_te_handler,
896 .clock_enable = fimd_dp_clock_enable, 881 .clock_enable = fimd_dp_clock_enable,
897 .clear_channels = fimd_clear_channels,
898}; 882};
899 883
900static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 884static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -913,13 +897,13 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
913 goto out; 897 goto out;
914 898
915 if (ctx->i80_if) { 899 if (ctx->i80_if) {
916 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 900 exynos_drm_crtc_finish_pageflip(ctx->crtc);
917 901
918 /* Exits triggering mode */ 902 /* Exits triggering mode */
919 atomic_set(&ctx->triggering, 0); 903 atomic_set(&ctx->triggering, 0);
920 } else { 904 } else {
921 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 905 drm_crtc_handle_vblank(&ctx->crtc->base);
922 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 906 exynos_drm_crtc_finish_pageflip(ctx->crtc);
923 907
924 /* set wait vsync event to zero and wake up queue. */ 908 /* set wait vsync event to zero and wake up queue. */
925 if (atomic_read(&ctx->wait_vsync_event)) { 909 if (atomic_read(&ctx->wait_vsync_event)) {
@@ -961,10 +945,13 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
961 if (IS_ERR(ctx->crtc)) 945 if (IS_ERR(ctx->crtc))
962 return PTR_ERR(ctx->crtc); 946 return PTR_ERR(ctx->crtc);
963 947
964 if (ctx->display) 948 if (ctx->encoder)
965 exynos_drm_create_enc_conn(drm_dev, ctx->display); 949 exynos_dpi_bind(drm_dev, ctx->encoder);
950
951 if (is_drm_iommu_supported(drm_dev))
952 fimd_clear_channels(ctx->crtc);
966 953
967 ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev); 954 ret = drm_iommu_attach_device(drm_dev, dev);
968 if (ret) 955 if (ret)
969 priv->pipe--; 956 priv->pipe--;
970 957
@@ -978,10 +965,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
978 965
979 fimd_disable(ctx->crtc); 966 fimd_disable(ctx->crtc);
980 967
981 fimd_iommu_detach_devices(ctx); 968 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
982 969
983 if (ctx->display) 970 if (ctx->encoder)
984 exynos_dpi_remove(ctx->display); 971 exynos_dpi_remove(ctx->encoder);
985} 972}
986 973
987static const struct component_ops fimd_component_ops = { 974static const struct component_ops fimd_component_ops = {
@@ -1088,10 +1075,9 @@ static int fimd_probe(struct platform_device *pdev)
1088 1075
1089 platform_set_drvdata(pdev, ctx); 1076 platform_set_drvdata(pdev, ctx);
1090 1077
1091 ctx->display = exynos_dpi_probe(dev); 1078 ctx->encoder = exynos_dpi_probe(dev);
1092 if (IS_ERR(ctx->display)) { 1079 if (IS_ERR(ctx->encoder))
1093 return PTR_ERR(ctx->display); 1080 return PTR_ERR(ctx->encoder);
1094 }
1095 1081
1096 pm_runtime_enable(dev); 1082 pm_runtime_enable(dev);
1097 1083
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 81a250830808..ba008391a2fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1319,9 +1319,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1319 return ret; 1319 return ret;
1320 } 1320 }
1321 1321
1322 if (!is_drm_iommu_supported(drm_dev))
1323 return 0;
1324
1325 ret = drm_iommu_attach_device(drm_dev, dev); 1322 ret = drm_iommu_attach_device(drm_dev, dev);
1326 if (ret < 0) { 1323 if (ret < 0) {
1327 dev_err(dev, "failed to enable iommu.\n"); 1324 dev_err(dev, "failed to enable iommu.\n");
@@ -1334,9 +1331,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1334 1331
1335static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1332static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1336{ 1333{
1337 if (!is_drm_iommu_supported(drm_dev))
1338 return;
1339
1340 drm_iommu_detach_device(drm_dev, dev); 1334 drm_iommu_detach_device(drm_dev, dev);
1341} 1335}
1342 1336
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0d5b9698d384..67461b77f040 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -13,98 +13,112 @@
13#include <drm/drm_vma_manager.h> 13#include <drm/drm_vma_manager.h>
14 14
15#include <linux/shmem_fs.h> 15#include <linux/shmem_fs.h>
16#include <linux/dma-buf.h>
16#include <drm/exynos_drm.h> 17#include <drm/exynos_drm.h>
17 18
18#include "exynos_drm_drv.h" 19#include "exynos_drm_drv.h"
19#include "exynos_drm_gem.h" 20#include "exynos_drm_gem.h"
20#include "exynos_drm_buf.h"
21#include "exynos_drm_iommu.h" 21#include "exynos_drm_iommu.h"
22 22
23static unsigned int convert_to_vm_err_msg(int msg) 23static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
24{ 24{
25 unsigned int out_msg; 25 struct drm_device *dev = obj->base.dev;
26 enum dma_attr attr;
27 unsigned int nr_pages;
26 28
27 switch (msg) { 29 if (obj->dma_addr) {
28 case 0: 30 DRM_DEBUG_KMS("already allocated.\n");
29 case -ERESTARTSYS: 31 return 0;
30 case -EINTR: 32 }
31 out_msg = VM_FAULT_NOPAGE;
32 break;
33 33
34 case -ENOMEM: 34 init_dma_attrs(&obj->dma_attrs);
35 out_msg = VM_FAULT_OOM;
36 break;
37 35
38 default: 36 /*
39 out_msg = VM_FAULT_SIGBUS; 37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 break; 38 * region will be allocated else physically contiguous
41 } 39 * as possible.
40 */
41 if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
42 43
43 return out_msg; 44 /*
44} 45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping.
47 */
48 if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49 attr = DMA_ATTR_WRITE_COMBINE;
50 else
51 attr = DMA_ATTR_NON_CONSISTENT;
45 52
46static int check_gem_flags(unsigned int flags) 53 dma_set_attr(attr, &obj->dma_attrs);
47{ 54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
48 if (flags & ~(EXYNOS_BO_MASK)) {
49 DRM_ERROR("invalid flags.\n");
50 return -EINVAL;
51 }
52 55
53 return 0; 56 nr_pages = obj->size >> PAGE_SHIFT;
54}
55 57
56static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, 58 if (!is_drm_iommu_supported(dev)) {
57 struct vm_area_struct *vma) 59 dma_addr_t start_addr;
58{ 60 unsigned int i = 0;
59 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
60 61
61 /* non-cachable as default. */ 62 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
62 if (obj->flags & EXYNOS_BO_CACHABLE) 63 if (!obj->pages) {
63 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 64 DRM_ERROR("failed to allocate pages.\n");
64 else if (obj->flags & EXYNOS_BO_WC) 65 return -ENOMEM;
65 vma->vm_page_prot = 66 }
66 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
67 else
68 vma->vm_page_prot =
69 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
70}
71 67
72static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 68 obj->cookie = dma_alloc_attrs(dev->dev,
73{ 69 obj->size,
74 /* TODO */ 70 &obj->dma_addr, GFP_KERNEL,
71 &obj->dma_attrs);
72 if (!obj->cookie) {
73 DRM_ERROR("failed to allocate buffer.\n");
74 drm_free_large(obj->pages);
75 return -ENOMEM;
76 }
77
78 start_addr = obj->dma_addr;
79 while (i < nr_pages) {
80 obj->pages[i] = phys_to_page(start_addr);
81 start_addr += PAGE_SIZE;
82 i++;
83 }
84 } else {
85 obj->pages = dma_alloc_attrs(dev->dev, obj->size,
86 &obj->dma_addr, GFP_KERNEL,
87 &obj->dma_attrs);
88 if (!obj->pages) {
89 DRM_ERROR("failed to allocate buffer.\n");
90 return -ENOMEM;
91 }
92 }
93
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 (unsigned long)obj->dma_addr,
96 obj->size);
75 97
76 return roundup(size, PAGE_SIZE); 98 return 0;
77} 99}
78 100
79static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, 101static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
80 struct vm_area_struct *vma,
81 unsigned long f_vaddr,
82 pgoff_t page_offset)
83{ 102{
84 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 103 struct drm_device *dev = obj->base.dev;
85 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
86 struct scatterlist *sgl;
87 unsigned long pfn;
88 int i;
89
90 if (!buf->sgt)
91 return -EINTR;
92 104
93 if (page_offset >= (buf->size >> PAGE_SHIFT)) { 105 if (!obj->dma_addr) {
94 DRM_ERROR("invalid page offset\n"); 106 DRM_DEBUG_KMS("dma_addr is invalid.\n");
95 return -EINVAL; 107 return;
96 } 108 }
97 109
98 sgl = buf->sgt->sgl; 110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
99 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { 111 (unsigned long)obj->dma_addr, obj->size);
100 if (page_offset < (sgl->length >> PAGE_SHIFT))
101 break;
102 page_offset -= (sgl->length >> PAGE_SHIFT);
103 }
104 112
105 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; 113 if (!is_drm_iommu_supported(dev)) {
114 dma_free_attrs(dev->dev, obj->size, obj->cookie,
115 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116 drm_free_large(obj->pages);
117 } else
118 dma_free_attrs(dev->dev, obj->size, obj->pages,
119 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
106 120
107 return vm_insert_mixed(vma, f_vaddr, pfn); 121 obj->dma_addr = (dma_addr_t)NULL;
108} 122}
109 123
110static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 124static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
131 145
132void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 146void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
133{ 147{
134 struct drm_gem_object *obj; 148 struct drm_gem_object *obj = &exynos_gem_obj->base;
135 struct exynos_drm_gem_buf *buf;
136
137 obj = &exynos_gem_obj->base;
138 buf = exynos_gem_obj->buffer;
139 149
140 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); 150 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
141 151
@@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
148 if (obj->import_attach) 158 if (obj->import_attach)
149 goto out; 159 goto out;
150 160
151 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); 161 exynos_drm_free_buf(exynos_gem_obj);
152 162
153out: 163out:
154 exynos_drm_fini_buf(obj->dev, buf);
155 exynos_gem_obj->buffer = NULL;
156
157 drm_gem_free_mmap_offset(obj); 164 drm_gem_free_mmap_offset(obj);
158 165
159 /* release file pointer to gem object. */ 166 /* release file pointer to gem object. */
@@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
180 187
181 drm_gem_object_unreference_unlocked(obj); 188 drm_gem_object_unreference_unlocked(obj);
182 189
183 return exynos_gem_obj->buffer->size; 190 return exynos_gem_obj->size;
184} 191}
185 192
186 193
@@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
193 200
194 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 201 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
195 if (!exynos_gem_obj) 202 if (!exynos_gem_obj)
196 return NULL; 203 return ERR_PTR(-ENOMEM);
197 204
198 exynos_gem_obj->size = size; 205 exynos_gem_obj->size = size;
199 obj = &exynos_gem_obj->base; 206 obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
202 if (ret < 0) { 209 if (ret < 0) {
203 DRM_ERROR("failed to initialize gem object\n"); 210 DRM_ERROR("failed to initialize gem object\n");
204 kfree(exynos_gem_obj); 211 kfree(exynos_gem_obj);
205 return NULL; 212 return ERR_PTR(ret);
206 } 213 }
207 214
208 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
215 unsigned long size) 222 unsigned long size)
216{ 223{
217 struct exynos_drm_gem_obj *exynos_gem_obj; 224 struct exynos_drm_gem_obj *exynos_gem_obj;
218 struct exynos_drm_gem_buf *buf;
219 int ret; 225 int ret;
220 226
227 if (flags & ~(EXYNOS_BO_MASK)) {
228 DRM_ERROR("invalid flags.\n");
229 return ERR_PTR(-EINVAL);
230 }
231
221 if (!size) { 232 if (!size) {
222 DRM_ERROR("invalid size.\n"); 233 DRM_ERROR("invalid size.\n");
223 return ERR_PTR(-EINVAL); 234 return ERR_PTR(-EINVAL);
224 } 235 }
225 236
226 size = roundup_gem_size(size, flags); 237 size = roundup(size, PAGE_SIZE);
227
228 ret = check_gem_flags(flags);
229 if (ret)
230 return ERR_PTR(ret);
231
232 buf = exynos_drm_init_buf(dev, size);
233 if (!buf)
234 return ERR_PTR(-ENOMEM);
235 238
236 exynos_gem_obj = exynos_drm_gem_init(dev, size); 239 exynos_gem_obj = exynos_drm_gem_init(dev, size);
237 if (!exynos_gem_obj) { 240 if (IS_ERR(exynos_gem_obj))
238 ret = -ENOMEM; 241 return exynos_gem_obj;
239 goto err_fini_buf;
240 }
241
242 exynos_gem_obj->buffer = buf;
243 242
244 /* set memory type and cache attribute from user side. */ 243 /* set memory type and cache attribute from user side. */
245 exynos_gem_obj->flags = flags; 244 exynos_gem_obj->flags = flags;
246 245
247 ret = exynos_drm_alloc_buf(dev, buf, flags); 246 ret = exynos_drm_alloc_buf(exynos_gem_obj);
248 if (ret < 0) 247 if (ret < 0) {
249 goto err_gem_fini; 248 drm_gem_object_release(&exynos_gem_obj->base);
249 kfree(exynos_gem_obj);
250 return ERR_PTR(ret);
251 }
250 252
251 return exynos_gem_obj; 253 return exynos_gem_obj;
252
253err_gem_fini:
254 drm_gem_object_release(&exynos_gem_obj->base);
255 kfree(exynos_gem_obj);
256err_fini_buf:
257 exynos_drm_fini_buf(dev, buf);
258 return ERR_PTR(ret);
259} 254}
260 255
261int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 256int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
294 289
295 exynos_gem_obj = to_exynos_gem_obj(obj); 290 exynos_gem_obj = to_exynos_gem_obj(obj);
296 291
297 return &exynos_gem_obj->buffer->dma_addr; 292 return &exynos_gem_obj->dma_addr;
298} 293}
299 294
300void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 295void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -322,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
322 struct vm_area_struct *vma) 317 struct vm_area_struct *vma)
323{ 318{
324 struct drm_device *drm_dev = exynos_gem_obj->base.dev; 319 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
325 struct exynos_drm_gem_buf *buffer;
326 unsigned long vm_size; 320 unsigned long vm_size;
327 int ret; 321 int ret;
328 322
@@ -331,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
331 325
332 vm_size = vma->vm_end - vma->vm_start; 326 vm_size = vma->vm_end - vma->vm_start;
333 327
334 /*
335 * a buffer contains information to physically continuous memory
336 * allocated by user request or at framebuffer creation.
337 */
338 buffer = exynos_gem_obj->buffer;
339
340 /* check if user-requested size is valid. */ 328 /* check if user-requested size is valid. */
341 if (vm_size > buffer->size) 329 if (vm_size > exynos_gem_obj->size)
342 return -EINVAL; 330 return -EINVAL;
343 331
344 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, 332 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
345 buffer->dma_addr, buffer->size, 333 exynos_gem_obj->dma_addr, exynos_gem_obj->size,
346 &buffer->dma_attrs); 334 &exynos_gem_obj->dma_attrs);
347 if (ret < 0) { 335 if (ret < 0) {
348 DRM_ERROR("failed to mmap.\n"); 336 DRM_ERROR("failed to mmap.\n");
349 return ret; 337 return ret;
@@ -503,15 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
503 491
504void exynos_drm_gem_free_object(struct drm_gem_object *obj) 492void exynos_drm_gem_free_object(struct drm_gem_object *obj)
505{ 493{
506 struct exynos_drm_gem_obj *exynos_gem_obj;
507 struct exynos_drm_gem_buf *buf;
508
509 exynos_gem_obj = to_exynos_gem_obj(obj);
510 buf = exynos_gem_obj->buffer;
511
512 if (obj->import_attach)
513 drm_prime_gem_destroy(obj, buf->sgt);
514
515 exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); 494 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
516} 495}
517 496
@@ -595,24 +574,34 @@ unlock:
595int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 574int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
596{ 575{
597 struct drm_gem_object *obj = vma->vm_private_data; 576 struct drm_gem_object *obj = vma->vm_private_data;
598 struct drm_device *dev = obj->dev; 577 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
599 unsigned long f_vaddr; 578 unsigned long pfn;
600 pgoff_t page_offset; 579 pgoff_t page_offset;
601 int ret; 580 int ret;
602 581
603 page_offset = ((unsigned long)vmf->virtual_address - 582 page_offset = ((unsigned long)vmf->virtual_address -
604 vma->vm_start) >> PAGE_SHIFT; 583 vma->vm_start) >> PAGE_SHIFT;
605 f_vaddr = (unsigned long)vmf->virtual_address;
606
607 mutex_lock(&dev->struct_mutex);
608 584
609 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); 585 if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
610 if (ret < 0) 586 DRM_ERROR("invalid page offset\n");
611 DRM_ERROR("failed to map a buffer with user.\n"); 587 ret = -EINVAL;
588 goto out;
589 }
612 590
613 mutex_unlock(&dev->struct_mutex); 591 pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
592 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
614 593
615 return convert_to_vm_err_msg(ret); 594out:
595 switch (ret) {
596 case 0:
597 case -ERESTARTSYS:
598 case -EINTR:
599 return VM_FAULT_NOPAGE;
600 case -ENOMEM:
601 return VM_FAULT_OOM;
602 default:
603 return VM_FAULT_SIGBUS;
604 }
616} 605}
617 606
618int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 607int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -631,11 +620,17 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
631 obj = vma->vm_private_data; 620 obj = vma->vm_private_data;
632 exynos_gem_obj = to_exynos_gem_obj(obj); 621 exynos_gem_obj = to_exynos_gem_obj(obj);
633 622
634 ret = check_gem_flags(exynos_gem_obj->flags); 623 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
635 if (ret)
636 goto err_close_vm;
637 624
638 update_vm_cache_attr(exynos_gem_obj, vma); 625 /* non-cachable as default. */
626 if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
627 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
628 else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
629 vma->vm_page_prot =
630 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
631 else
632 vma->vm_page_prot =
633 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
639 634
640 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma); 635 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
641 if (ret) 636 if (ret)
@@ -649,3 +644,76 @@ err_close_vm:
649 644
650 return ret; 645 return ret;
651} 646}
647
648/* low-level interface prime helpers */
649struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
650{
651 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
652 int npages;
653
654 npages = exynos_gem_obj->size >> PAGE_SHIFT;
655
656 return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
657}
658
659struct drm_gem_object *
660exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
661 struct dma_buf_attachment *attach,
662 struct sg_table *sgt)
663{
664 struct exynos_drm_gem_obj *exynos_gem_obj;
665 int npages;
666 int ret;
667
668 exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
669 if (IS_ERR(exynos_gem_obj)) {
670 ret = PTR_ERR(exynos_gem_obj);
671 goto err;
672 }
673
674 exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
675
676 npages = exynos_gem_obj->size >> PAGE_SHIFT;
677 exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
678 if (!exynos_gem_obj->pages) {
679 ret = -ENOMEM;
680 goto err;
681 }
682
683 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
684 npages);
685 if (ret < 0)
686 goto err_free_large;
687
688 if (sgt->nents == 1) {
689 /* always physically continuous memory if sgt->nents is 1. */
690 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
691 } else {
692 /*
693 * this case could be CONTIG or NONCONTIG type but for now
694 * sets NONCONTIG.
695 * TODO. we have to find a way that exporter can notify
696 * the type of its own buffer to importer.
697 */
698 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
699 }
700
701 return &exynos_gem_obj->base;
702
703err_free_large:
704 drm_free_large(exynos_gem_obj->pages);
705err:
706 drm_gem_object_release(&exynos_gem_obj->base);
707 kfree(exynos_gem_obj);
708 return ERR_PTR(ret);
709}
710
711void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
712{
713 return NULL;
714}
715
716void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
717{
718 /* Nothing to do */
719}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 6f42e2248288..cd62f8410d1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -20,35 +20,6 @@
20#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG) 20#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
21 21
22/* 22/*
23 * exynos drm gem buffer structure.
24 *
25 * @cookie: cookie returned by dma_alloc_attrs
26 * @kvaddr: kernel virtual address to allocated memory region.
27 * *userptr: user space address.
28 * @dma_addr: bus address(accessed by dma) to allocated memory region.
29 * - this address could be physical address without IOMMU and
30 * device address with IOMMU.
31 * @write: whether pages will be written to by the caller.
32 * @pages: Array of backing pages.
33 * @sgt: sg table to transfer page data.
34 * @size: size of allocated memory region.
35 * @pfnmap: indicate whether memory region from userptr is mmaped with
36 * VM_PFNMAP or not.
37 */
38struct exynos_drm_gem_buf {
39 void *cookie;
40 void __iomem *kvaddr;
41 unsigned long userptr;
42 dma_addr_t dma_addr;
43 struct dma_attrs dma_attrs;
44 unsigned int write;
45 struct page **pages;
46 struct sg_table *sgt;
47 unsigned long size;
48 bool pfnmap;
49};
50
51/*
52 * exynos drm buffer structure. 23 * exynos drm buffer structure.
53 * 24 *
54 * @base: a gem object. 25 * @base: a gem object.
@@ -59,18 +30,28 @@ struct exynos_drm_gem_buf {
59 * by user request or at framebuffer creation. 30 * by user request or at framebuffer creation.
60 * continuous memory region allocated by user request 31 * continuous memory region allocated by user request
61 * or at framebuffer creation. 32 * or at framebuffer creation.
33 * @flags: indicate memory type to allocated buffer and cache attruibute.
62 * @size: size requested from user, in bytes and this size is aligned 34 * @size: size requested from user, in bytes and this size is aligned
63 * in page unit. 35 * in page unit.
64 * @flags: indicate memory type to allocated buffer and cache attruibute. 36 * @cookie: cookie returned by dma_alloc_attrs
37 * @kvaddr: kernel virtual address to allocated memory region.
38 * @dma_addr: bus address(accessed by dma) to allocated memory region.
39 * - this address could be physical address without IOMMU and
40 * device address with IOMMU.
41 * @pages: Array of backing pages.
65 * 42 *
66 * P.S. this object would be transferred to user as kms_bo.handle so 43 * P.S. this object would be transferred to user as kms_bo.handle so
67 * user can access the buffer through kms_bo.handle. 44 * user can access the buffer through kms_bo.handle.
68 */ 45 */
69struct exynos_drm_gem_obj { 46struct exynos_drm_gem_obj {
70 struct drm_gem_object base; 47 struct drm_gem_object base;
71 struct exynos_drm_gem_buf *buffer; 48 unsigned int flags;
72 unsigned long size; 49 unsigned long size;
73 unsigned int flags; 50 void *cookie;
51 void __iomem *kvaddr;
52 dma_addr_t dma_addr;
53 struct dma_attrs dma_attrs;
54 struct page **pages;
74}; 55};
75 56
76struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 57struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -177,4 +158,13 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
177 struct sg_table *sgt, 158 struct sg_table *sgt,
178 enum dma_data_direction dir); 159 enum dma_data_direction dir);
179 160
161/* low-level interface prime helpers */
162struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
163struct drm_gem_object *
164exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
165 struct dma_buf_attachment *attach,
166 struct sg_table *sgt);
167void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
168void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
169
180#endif 170#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..808a0a013780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -582,9 +582,17 @@ static int gsc_src_set_transf(struct device *dev,
582 break; 582 break;
583 case EXYNOS_DRM_DEGREE_180: 583 case EXYNOS_DRM_DEGREE_180:
584 cfg |= GSC_IN_ROT_180; 584 cfg |= GSC_IN_ROT_180;
585 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
586 cfg &= ~GSC_IN_ROT_XFLIP;
587 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
588 cfg &= ~GSC_IN_ROT_YFLIP;
585 break; 589 break;
586 case EXYNOS_DRM_DEGREE_270: 590 case EXYNOS_DRM_DEGREE_270:
587 cfg |= GSC_IN_ROT_270; 591 cfg |= GSC_IN_ROT_270;
592 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
593 cfg &= ~GSC_IN_ROT_XFLIP;
594 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
595 cfg &= ~GSC_IN_ROT_YFLIP;
588 break; 596 break;
589 default: 597 default:
590 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); 598 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -593,8 +601,7 @@ static int gsc_src_set_transf(struct device *dev,
593 601
594 gsc_write(cfg, GSC_IN_CON); 602 gsc_write(cfg, GSC_IN_CON);
595 603
596 ctx->rotation = cfg & 604 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
597 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
598 *swap = ctx->rotation; 605 *swap = ctx->rotation;
599 606
600 return 0; 607 return 0;
@@ -846,9 +853,17 @@ static int gsc_dst_set_transf(struct device *dev,
846 break; 853 break;
847 case EXYNOS_DRM_DEGREE_180: 854 case EXYNOS_DRM_DEGREE_180:
848 cfg |= GSC_IN_ROT_180; 855 cfg |= GSC_IN_ROT_180;
856 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
857 cfg &= ~GSC_IN_ROT_XFLIP;
858 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
859 cfg &= ~GSC_IN_ROT_YFLIP;
849 break; 860 break;
850 case EXYNOS_DRM_DEGREE_270: 861 case EXYNOS_DRM_DEGREE_270:
851 cfg |= GSC_IN_ROT_270; 862 cfg |= GSC_IN_ROT_270;
863 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
864 cfg &= ~GSC_IN_ROT_XFLIP;
865 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
866 cfg &= ~GSC_IN_ROT_YFLIP;
852 break; 867 break;
853 default: 868 default:
854 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); 869 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -857,8 +872,7 @@ static int gsc_dst_set_transf(struct device *dev,
857 872
858 gsc_write(cfg, GSC_IN_CON); 873 gsc_write(cfg, GSC_IN_CON);
859 874
860 ctx->rotation = cfg & 875 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
861 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
862 *swap = ctx->rotation; 876 *swap = ctx->rotation;
863 877
864 return 0; 878 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d4ec7465e9cc..055e8ec2ef21 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -87,10 +87,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
87 struct device *dev = drm_dev->dev; 87 struct device *dev = drm_dev->dev;
88 int ret; 88 int ret;
89 89
90 if (!dev->archdata.mapping) { 90 if (!dev->archdata.mapping)
91 DRM_ERROR("iommu_mapping is null.\n"); 91 return 0;
92 return -EFAULT;
93 }
94 92
95 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, 93 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
96 sizeof(*subdrv_dev->dma_parms), 94 sizeof(*subdrv_dev->dma_parms),
@@ -144,17 +142,3 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
144 iommu_detach_device(mapping->domain, subdrv_dev); 142 iommu_detach_device(mapping->domain, subdrv_dev);
145 drm_release_iommu_mapping(drm_dev); 143 drm_release_iommu_mapping(drm_dev);
146} 144}
147
148int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
149 struct drm_device *drm_dev, struct device *subdrv_dev)
150{
151 int ret = 0;
152
153 if (is_drm_iommu_supported(drm_dev)) {
154 if (exynos_crtc->ops->clear_channels)
155 exynos_crtc->ops->clear_channels(exynos_crtc);
156 return drm_iommu_attach_device(drm_dev, subdrv_dev);
157 }
158
159 return ret;
160}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 8341c7a475b4..dc1b5441f491 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,19 +29,11 @@ void drm_iommu_detach_device(struct drm_device *dev_dev,
29 29
30static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) 30static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
31{ 31{
32#ifdef CONFIG_ARM_DMA_USE_IOMMU
33 struct device *dev = drm_dev->dev; 32 struct device *dev = drm_dev->dev;
34 33
35 return dev->archdata.mapping ? true : false; 34 return dev->archdata.mapping ? true : false;
36#else
37 return false;
38#endif
39} 35}
40 36
41int drm_iommu_attach_device_if_possible(
42 struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
43 struct device *subdrv_dev);
44
45#else 37#else
46 38
47static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) 39static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -69,12 +61,5 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
69 return false; 61 return false;
70} 62}
71 63
72static inline int drm_iommu_attach_device_if_possible(
73 struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
74 struct device *subdrv_dev)
75{
76 return 0;
77}
78
79#endif 64#endif
80#endif 65#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67e5451e066f..67d24236e745 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1622,12 +1622,10 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1622 INIT_LIST_HEAD(&ippdrv->cmd_list); 1622 INIT_LIST_HEAD(&ippdrv->cmd_list);
1623 mutex_init(&ippdrv->cmd_lock); 1623 mutex_init(&ippdrv->cmd_lock);
1624 1624
1625 if (is_drm_iommu_supported(drm_dev)) { 1625 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1626 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1626 if (ret) {
1627 if (ret) { 1627 DRM_ERROR("failed to activate iommu\n");
1628 DRM_ERROR("failed to activate iommu\n"); 1628 goto err;
1629 goto err;
1630 }
1631 } 1629 }
1632 } 1630 }
1633 1631
@@ -1637,8 +1635,7 @@ err:
1637 /* get ipp driver entry */ 1635 /* get ipp driver entry */
1638 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, 1636 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1639 drv_list) { 1637 drv_list) {
1640 if (is_drm_iommu_supported(drm_dev)) 1638 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1641 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1642 1639
1643 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1640 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1644 ippdrv->prop_list.ipp_id); 1641 ippdrv->prop_list.ipp_id);
@@ -1654,8 +1651,7 @@ static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1654 1651
1655 /* get ipp driver entry */ 1652 /* get ipp driver entry */
1656 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) { 1653 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1657 if (is_drm_iommu_supported(drm_dev)) 1654 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1658 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1659 1655
1660 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1656 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1661 ippdrv->prop_list.ipp_id); 1657 ippdrv->prop_list.ipp_id);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a729980d3c2f..d9a68fd83120 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -97,29 +97,18 @@ static void exynos_plane_mode_set(struct drm_plane *plane,
97 /* set drm framebuffer data. */ 97 /* set drm framebuffer data. */
98 exynos_plane->src_x = src_x; 98 exynos_plane->src_x = src_x;
99 exynos_plane->src_y = src_y; 99 exynos_plane->src_y = src_y;
100 exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16; 100 exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
101 exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16; 101 exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
102 exynos_plane->fb_width = fb->width;
103 exynos_plane->fb_height = fb->height;
104 exynos_plane->bpp = fb->bits_per_pixel;
105 exynos_plane->pitch = fb->pitches[0];
106 exynos_plane->pixel_format = fb->pixel_format;
107 102
108 /* set plane range to be displayed. */ 103 /* set plane range to be displayed. */
109 exynos_plane->crtc_x = crtc_x; 104 exynos_plane->crtc_x = crtc_x;
110 exynos_plane->crtc_y = crtc_y; 105 exynos_plane->crtc_y = crtc_y;
111 exynos_plane->crtc_width = actual_w; 106 exynos_plane->crtc_w = actual_w;
112 exynos_plane->crtc_height = actual_h; 107 exynos_plane->crtc_h = actual_h;
113
114 /* set drm mode data. */
115 exynos_plane->mode_width = mode->hdisplay;
116 exynos_plane->mode_height = mode->vdisplay;
117 exynos_plane->refresh = mode->vrefresh;
118 exynos_plane->scan_flag = mode->flags;
119 108
120 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)", 109 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
121 exynos_plane->crtc_x, exynos_plane->crtc_y, 110 exynos_plane->crtc_x, exynos_plane->crtc_y,
122 exynos_plane->crtc_width, exynos_plane->crtc_height); 111 exynos_plane->crtc_w, exynos_plane->crtc_h);
123 112
124 plane->crtc = crtc; 113 plane->crtc = crtc;
125} 114}
@@ -145,15 +134,15 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
145 134
146 nr = exynos_drm_fb_get_buf_cnt(state->fb); 135 nr = exynos_drm_fb_get_buf_cnt(state->fb);
147 for (i = 0; i < nr; i++) { 136 for (i = 0; i < nr; i++) {
148 struct exynos_drm_gem_buf *buffer = 137 struct exynos_drm_gem_obj *obj =
149 exynos_drm_fb_buffer(state->fb, i); 138 exynos_drm_fb_gem_obj(state->fb, i);
150 139
151 if (!buffer) { 140 if (!obj) {
152 DRM_DEBUG_KMS("buffer is null\n"); 141 DRM_DEBUG_KMS("gem object is null\n");
153 return -EFAULT; 142 return -EFAULT;
154 } 143 }
155 144
156 exynos_plane->dma_addr[i] = buffer->dma_addr + 145 exynos_plane->dma_addr[i] = obj->dma_addr +
157 state->fb->offsets[i]; 146 state->fb->offsets[i];
158 147
159 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", 148 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -179,8 +168,8 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
179 state->src_x >> 16, state->src_y >> 16, 168 state->src_x >> 16, state->src_y >> 16,
180 state->src_w >> 16, state->src_h >> 16); 169 state->src_w >> 16, state->src_h >> 16);
181 170
182 if (exynos_crtc->ops->win_commit) 171 if (exynos_crtc->ops->update_plane)
183 exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos); 172 exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
184} 173}
185 174
186static void exynos_plane_atomic_disable(struct drm_plane *plane, 175static void exynos_plane_atomic_disable(struct drm_plane *plane,
@@ -192,9 +181,9 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
192 if (!old_state->crtc) 181 if (!old_state->crtc)
193 return; 182 return;
194 183
195 if (exynos_crtc->ops->win_disable) 184 if (exynos_crtc->ops->disable_plane)
196 exynos_crtc->ops->win_disable(exynos_crtc, 185 exynos_crtc->ops->disable_plane(exynos_crtc,
197 exynos_plane->zpos); 186 exynos_plane);
198} 187}
199 188
200static const struct drm_plane_helper_funcs plane_helper_funcs = { 189static const struct drm_plane_helper_funcs plane_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3413393d8a16..581af35861a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -25,7 +25,6 @@
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_crtc.h" 26#include "exynos_drm_crtc.h"
27#include "exynos_drm_plane.h" 27#include "exynos_drm_plane.h"
28#include "exynos_drm_encoder.h"
29#include "exynos_drm_vidi.h" 28#include "exynos_drm_vidi.h"
30 29
31/* vidi has totally three virtual windows. */ 30/* vidi has totally three virtual windows. */
@@ -35,11 +34,10 @@
35 connector) 34 connector)
36 35
37struct vidi_context { 36struct vidi_context {
38 struct exynos_drm_display display; 37 struct drm_encoder encoder;
39 struct platform_device *pdev; 38 struct platform_device *pdev;
40 struct drm_device *drm_dev; 39 struct drm_device *drm_dev;
41 struct exynos_drm_crtc *crtc; 40 struct exynos_drm_crtc *crtc;
42 struct drm_encoder *encoder;
43 struct drm_connector connector; 41 struct drm_connector connector;
44 struct exynos_drm_plane planes[WINDOWS_NR]; 42 struct exynos_drm_plane planes[WINDOWS_NR];
45 struct edid *raw_edid; 43 struct edid *raw_edid;
@@ -55,9 +53,9 @@ struct vidi_context {
55 int pipe; 53 int pipe;
56}; 54};
57 55
58static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d) 56static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
59{ 57{
60 return container_of(d, struct vidi_context, display); 58 return container_of(e, struct vidi_context, encoder);
61} 59}
62 60
63static const char fake_edid_info[] = { 61static const char fake_edid_info[] = {
@@ -100,7 +98,7 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
100 /* 98 /*
101 * in case of page flip request, vidi_finish_pageflip function 99 * in case of page flip request, vidi_finish_pageflip function
102 * will not be called because direct_vblank is true and then 100 * will not be called because direct_vblank is true and then
103 * that function will be called by crtc_ops->win_commit callback 101 * that function will be called by crtc_ops->update_plane callback
104 */ 102 */
105 schedule_work(&ctx->work); 103 schedule_work(&ctx->work);
106 104
@@ -118,19 +116,14 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
118 ctx->vblank_on = false; 116 ctx->vblank_on = false;
119} 117}
120 118
121static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) 119static void vidi_update_plane(struct exynos_drm_crtc *crtc,
120 struct exynos_drm_plane *plane)
122{ 121{
123 struct vidi_context *ctx = crtc->ctx; 122 struct vidi_context *ctx = crtc->ctx;
124 struct exynos_drm_plane *plane;
125 123
126 if (ctx->suspended) 124 if (ctx->suspended)
127 return; 125 return;
128 126
129 if (win < 0 || win >= WINDOWS_NR)
130 return;
131
132 plane = &ctx->planes[win];
133
134 DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr); 127 DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
135 128
136 if (ctx->vblank_on) 129 if (ctx->vblank_on)
@@ -179,7 +172,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
179 .disable = vidi_disable, 172 .disable = vidi_disable,
180 .enable_vblank = vidi_enable_vblank, 173 .enable_vblank = vidi_enable_vblank,
181 .disable_vblank = vidi_disable_vblank, 174 .disable_vblank = vidi_disable_vblank,
182 .win_commit = vidi_win_commit, 175 .update_plane = vidi_update_plane,
183}; 176};
184 177
185static void vidi_fake_vblank_handler(struct work_struct *work) 178static void vidi_fake_vblank_handler(struct work_struct *work)
@@ -196,7 +189,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
196 mutex_lock(&ctx->lock); 189 mutex_lock(&ctx->lock);
197 190
198 if (ctx->direct_vblank) { 191 if (ctx->direct_vblank) {
199 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 192 drm_crtc_handle_vblank(&ctx->crtc->base);
200 ctx->direct_vblank = false; 193 ctx->direct_vblank = false;
201 mutex_unlock(&ctx->lock); 194 mutex_unlock(&ctx->lock);
202 return; 195 return;
@@ -204,7 +197,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
204 197
205 mutex_unlock(&ctx->lock); 198 mutex_unlock(&ctx->lock);
206 199
207 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 200 exynos_drm_crtc_finish_pageflip(ctx->crtc);
208} 201}
209 202
210static int vidi_show_connection(struct device *dev, 203static int vidi_show_connection(struct device *dev,
@@ -259,9 +252,7 @@ static DEVICE_ATTR(connection, 0644, vidi_show_connection,
259int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, 252int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
260 struct drm_file *file_priv) 253 struct drm_file *file_priv)
261{ 254{
262 struct vidi_context *ctx = NULL; 255 struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
263 struct drm_encoder *encoder;
264 struct exynos_drm_display *display;
265 struct drm_exynos_vidi_connection *vidi = data; 256 struct drm_exynos_vidi_connection *vidi = data;
266 257
267 if (!vidi) { 258 if (!vidi) {
@@ -274,21 +265,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
274 return -EINVAL; 265 return -EINVAL;
275 } 266 }
276 267
277 list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
278 head) {
279 display = exynos_drm_get_display(encoder);
280
281 if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
282 ctx = display_to_vidi(display);
283 break;
284 }
285 }
286
287 if (!ctx) {
288 DRM_DEBUG_KMS("not found virtual device type encoder.\n");
289 return -EINVAL;
290 }
291
292 if (ctx->connected == vidi->connection) { 268 if (ctx->connected == vidi->connection) {
293 DRM_DEBUG_KMS("same connection request.\n"); 269 DRM_DEBUG_KMS("same connection request.\n");
294 return -EINVAL; 270 return -EINVAL;
@@ -381,7 +357,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
381{ 357{
382 struct vidi_context *ctx = ctx_from_connector(connector); 358 struct vidi_context *ctx = ctx_from_connector(connector);
383 359
384 return ctx->encoder; 360 return &ctx->encoder;
385} 361}
386 362
387static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { 363static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
@@ -389,14 +365,12 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
389 .best_encoder = vidi_best_encoder, 365 .best_encoder = vidi_best_encoder,
390}; 366};
391 367
392static int vidi_create_connector(struct exynos_drm_display *display, 368static int vidi_create_connector(struct drm_encoder *encoder)
393 struct drm_encoder *encoder)
394{ 369{
395 struct vidi_context *ctx = display_to_vidi(display); 370 struct vidi_context *ctx = encoder_to_vidi(encoder);
396 struct drm_connector *connector = &ctx->connector; 371 struct drm_connector *connector = &ctx->connector;
397 int ret; 372 int ret;
398 373
399 ctx->encoder = encoder;
400 connector->polled = DRM_CONNECTOR_POLL_HPD; 374 connector->polled = DRM_CONNECTOR_POLL_HPD;
401 375
402 ret = drm_connector_init(ctx->drm_dev, connector, 376 ret = drm_connector_init(ctx->drm_dev, connector,
@@ -413,19 +387,47 @@ static int vidi_create_connector(struct exynos_drm_display *display,
413 return 0; 387 return 0;
414} 388}
415 389
390static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
391 const struct drm_display_mode *mode,
392 struct drm_display_mode *adjusted_mode)
393{
394 return true;
395}
396
397static void exynos_vidi_mode_set(struct drm_encoder *encoder,
398 struct drm_display_mode *mode,
399 struct drm_display_mode *adjusted_mode)
400{
401}
416 402
417static struct exynos_drm_display_ops vidi_display_ops = { 403static void exynos_vidi_enable(struct drm_encoder *encoder)
418 .create_connector = vidi_create_connector, 404{
405}
406
407static void exynos_vidi_disable(struct drm_encoder *encoder)
408{
409}
410
411static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
412 .mode_fixup = exynos_vidi_mode_fixup,
413 .mode_set = exynos_vidi_mode_set,
414 .enable = exynos_vidi_enable,
415 .disable = exynos_vidi_disable,
416};
417
418static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
419 .destroy = drm_encoder_cleanup,
419}; 420};
420 421
421static int vidi_bind(struct device *dev, struct device *master, void *data) 422static int vidi_bind(struct device *dev, struct device *master, void *data)
422{ 423{
423 struct vidi_context *ctx = dev_get_drvdata(dev); 424 struct vidi_context *ctx = dev_get_drvdata(dev);
424 struct drm_device *drm_dev = data; 425 struct drm_device *drm_dev = data;
426 struct drm_encoder *encoder = &ctx->encoder;
425 struct exynos_drm_plane *exynos_plane; 427 struct exynos_drm_plane *exynos_plane;
426 enum drm_plane_type type; 428 enum drm_plane_type type;
427 unsigned int zpos; 429 unsigned int zpos;
428 int ret; 430 int pipe, ret;
429 431
430 vidi_ctx_initialize(ctx, drm_dev); 432 vidi_ctx_initialize(ctx, drm_dev);
431 433
@@ -447,9 +449,24 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
447 return PTR_ERR(ctx->crtc); 449 return PTR_ERR(ctx->crtc);
448 } 450 }
449 451
450 ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display); 452 pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
453 EXYNOS_DISPLAY_TYPE_VIDI);
454 if (pipe < 0)
455 return pipe;
456
457 encoder->possible_crtcs = 1 << pipe;
458
459 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
460
461 drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
462 DRM_MODE_ENCODER_TMDS);
463
464 drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
465
466 ret = vidi_create_connector(encoder);
451 if (ret) { 467 if (ret) {
452 ctx->crtc->base.funcs->destroy(&ctx->crtc->base); 468 DRM_ERROR("failed to create connector ret = %d\n", ret);
469 drm_encoder_cleanup(encoder);
453 return ret; 470 return ret;
454 } 471 }
455 472
@@ -475,8 +492,6 @@ static int vidi_probe(struct platform_device *pdev)
475 if (!ctx) 492 if (!ctx)
476 return -ENOMEM; 493 return -ENOMEM;
477 494
478 ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
479 ctx->display.ops = &vidi_display_ops;
480 ctx->default_win = 0; 495 ctx->default_win = 0;
481 ctx->pdev = pdev; 496 ctx->pdev = pdev;
482 497
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..932f7fa240f8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -22,7 +22,6 @@
22#include "regs-hdmi.h" 22#include "regs-hdmi.h"
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/spinlock.h>
26#include <linux/wait.h> 25#include <linux/wait.h>
27#include <linux/i2c.h> 26#include <linux/i2c.h>
28#include <linux/platform_device.h> 27#include <linux/platform_device.h>
@@ -33,8 +32,8 @@
33#include <linux/clk.h> 32#include <linux/clk.h>
34#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
35#include <linux/io.h> 34#include <linux/io.h>
36#include <linux/of.h>
37#include <linux/of_address.h> 35#include <linux/of_address.h>
36#include <linux/of_device.h>
38#include <linux/of_gpio.h> 37#include <linux/of_gpio.h>
39#include <linux/hdmi.h> 38#include <linux/hdmi.h>
40#include <linux/component.h> 39#include <linux/component.h>
@@ -48,7 +47,6 @@
48#include "exynos_mixer.h" 47#include "exynos_mixer.h"
49 48
50#include <linux/gpio.h> 49#include <linux/gpio.h>
51#include <media/s5p_hdmi.h>
52 50
53#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector) 51#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
54 52
@@ -88,109 +86,14 @@ struct hdmi_resources {
88 int regul_count; 86 int regul_count;
89}; 87};
90 88
91struct hdmi_tg_regs {
92 u8 cmd[1];
93 u8 h_fsz[2];
94 u8 hact_st[2];
95 u8 hact_sz[2];
96 u8 v_fsz[2];
97 u8 vsync[2];
98 u8 vsync2[2];
99 u8 vact_st[2];
100 u8 vact_sz[2];
101 u8 field_chg[2];
102 u8 vact_st2[2];
103 u8 vact_st3[2];
104 u8 vact_st4[2];
105 u8 vsync_top_hdmi[2];
106 u8 vsync_bot_hdmi[2];
107 u8 field_top_hdmi[2];
108 u8 field_bot_hdmi[2];
109 u8 tg_3d[1];
110};
111
112struct hdmi_v13_core_regs {
113 u8 h_blank[2];
114 u8 v_blank[3];
115 u8 h_v_line[3];
116 u8 vsync_pol[1];
117 u8 int_pro_mode[1];
118 u8 v_blank_f[3];
119 u8 h_sync_gen[3];
120 u8 v_sync_gen1[3];
121 u8 v_sync_gen2[3];
122 u8 v_sync_gen3[3];
123};
124
125struct hdmi_v14_core_regs {
126 u8 h_blank[2];
127 u8 v2_blank[2];
128 u8 v1_blank[2];
129 u8 v_line[2];
130 u8 h_line[2];
131 u8 hsync_pol[1];
132 u8 vsync_pol[1];
133 u8 int_pro_mode[1];
134 u8 v_blank_f0[2];
135 u8 v_blank_f1[2];
136 u8 h_sync_start[2];
137 u8 h_sync_end[2];
138 u8 v_sync_line_bef_2[2];
139 u8 v_sync_line_bef_1[2];
140 u8 v_sync_line_aft_2[2];
141 u8 v_sync_line_aft_1[2];
142 u8 v_sync_line_aft_pxl_2[2];
143 u8 v_sync_line_aft_pxl_1[2];
144 u8 v_blank_f2[2]; /* for 3D mode */
145 u8 v_blank_f3[2]; /* for 3D mode */
146 u8 v_blank_f4[2]; /* for 3D mode */
147 u8 v_blank_f5[2]; /* for 3D mode */
148 u8 v_sync_line_aft_3[2];
149 u8 v_sync_line_aft_4[2];
150 u8 v_sync_line_aft_5[2];
151 u8 v_sync_line_aft_6[2];
152 u8 v_sync_line_aft_pxl_3[2];
153 u8 v_sync_line_aft_pxl_4[2];
154 u8 v_sync_line_aft_pxl_5[2];
155 u8 v_sync_line_aft_pxl_6[2];
156 u8 vact_space_1[2];
157 u8 vact_space_2[2];
158 u8 vact_space_3[2];
159 u8 vact_space_4[2];
160 u8 vact_space_5[2];
161 u8 vact_space_6[2];
162};
163
164struct hdmi_v13_conf {
165 struct hdmi_v13_core_regs core;
166 struct hdmi_tg_regs tg;
167};
168
169struct hdmi_v14_conf {
170 struct hdmi_v14_core_regs core;
171 struct hdmi_tg_regs tg;
172};
173
174struct hdmi_conf_regs {
175 int pixel_clock;
176 int cea_video_id;
177 enum hdmi_picture_aspect aspect_ratio;
178 union {
179 struct hdmi_v13_conf v13_conf;
180 struct hdmi_v14_conf v14_conf;
181 } conf;
182};
183
184struct hdmi_context { 89struct hdmi_context {
185 struct exynos_drm_display display; 90 struct drm_encoder encoder;
186 struct device *dev; 91 struct device *dev;
187 struct drm_device *drm_dev; 92 struct drm_device *drm_dev;
188 struct drm_connector connector; 93 struct drm_connector connector;
189 struct drm_encoder *encoder;
190 bool hpd; 94 bool hpd;
191 bool powered; 95 bool powered;
192 bool dvi_mode; 96 bool dvi_mode;
193 struct mutex hdmi_mutex;
194 97
195 void __iomem *regs; 98 void __iomem *regs;
196 int irq; 99 int irq;
@@ -201,22 +104,20 @@ struct hdmi_context {
201 104
202 /* current hdmiphy conf regs */ 105 /* current hdmiphy conf regs */
203 struct drm_display_mode current_mode; 106 struct drm_display_mode current_mode;
204 struct hdmi_conf_regs mode_conf; 107 u8 cea_video_id;
205 108
206 struct hdmi_resources res; 109 struct hdmi_resources res;
110 const struct hdmi_driver_data *drv_data;
207 111
208 int hpd_gpio; 112 int hpd_gpio;
209 void __iomem *regs_hdmiphy; 113 void __iomem *regs_hdmiphy;
210 const struct hdmiphy_config *phy_confs;
211 unsigned int phy_conf_count;
212 114
213 struct regmap *pmureg; 115 struct regmap *pmureg;
214 enum hdmi_type type;
215}; 116};
216 117
217static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d) 118static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
218{ 119{
219 return container_of(d, struct hdmi_context, display); 120 return container_of(e, struct hdmi_context, encoder);
220} 121}
221 122
222struct hdmiphy_config { 123struct hdmiphy_config {
@@ -624,6 +525,16 @@ static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
624 writeb(value, hdata->regs + reg_id); 525 writeb(value, hdata->regs + reg_id);
625} 526}
626 527
528static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
529 int bytes, u32 val)
530{
531 while (--bytes >= 0) {
532 writeb(val & 0xff, hdata->regs + reg_id);
533 val >>= 8;
534 reg_id += 4;
535 }
536}
537
627static inline void hdmi_reg_writemask(struct hdmi_context *hdata, 538static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
628 u32 reg_id, u32 value, u32 mask) 539 u32 reg_id, u32 value, u32 mask)
629{ 540{
@@ -930,7 +841,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
930 841
931static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) 842static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
932{ 843{
933 if (hdata->type == HDMI_TYPE13) 844 if (hdata->drv_data->type == HDMI_TYPE13)
934 hdmi_v13_regs_dump(hdata, prefix); 845 hdmi_v13_regs_dump(hdata, prefix);
935 else 846 else
936 hdmi_v14_regs_dump(hdata, prefix); 847 hdmi_v14_regs_dump(hdata, prefix);
@@ -957,7 +868,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
957 u32 hdr_sum; 868 u32 hdr_sum;
958 u8 chksum; 869 u8 chksum;
959 u32 mod; 870 u32 mod;
960 u32 vic; 871 u8 ar;
961 872
962 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); 873 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
963 if (hdata->dvi_mode) { 874 if (hdata->dvi_mode) {
@@ -988,27 +899,22 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
988 * Set the aspect ratio as per the mode, mentioned in 899 * Set the aspect ratio as per the mode, mentioned in
989 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard 900 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
990 */ 901 */
991 switch (hdata->mode_conf.aspect_ratio) { 902 ar = hdata->current_mode.picture_aspect_ratio;
903 switch (ar) {
992 case HDMI_PICTURE_ASPECT_4_3: 904 case HDMI_PICTURE_ASPECT_4_3:
993 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), 905 ar |= AVI_4_3_CENTER_RATIO;
994 hdata->mode_conf.aspect_ratio |
995 AVI_4_3_CENTER_RATIO);
996 break; 906 break;
997 case HDMI_PICTURE_ASPECT_16_9: 907 case HDMI_PICTURE_ASPECT_16_9:
998 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), 908 ar |= AVI_16_9_CENTER_RATIO;
999 hdata->mode_conf.aspect_ratio |
1000 AVI_16_9_CENTER_RATIO);
1001 break; 909 break;
1002 case HDMI_PICTURE_ASPECT_NONE: 910 case HDMI_PICTURE_ASPECT_NONE:
1003 default: 911 default:
1004 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), 912 ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
1005 hdata->mode_conf.aspect_ratio |
1006 AVI_SAME_AS_PIC_ASPECT_RATIO);
1007 break; 913 break;
1008 } 914 }
915 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
1009 916
1010 vic = hdata->mode_conf.cea_video_id; 917 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
1011 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
1012 918
1013 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), 919 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
1014 infoframe->any.length, hdr_sum); 920 infoframe->any.length, hdr_sum);
@@ -1038,10 +944,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
1038{ 944{
1039 struct hdmi_context *hdata = ctx_from_connector(connector); 945 struct hdmi_context *hdata = ctx_from_connector(connector);
1040 946
1041 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 947 if (gpio_get_value(hdata->hpd_gpio))
948 return connector_status_connected;
1042 949
1043 return hdata->hpd ? connector_status_connected : 950 return connector_status_disconnected;
1044 connector_status_disconnected;
1045} 951}
1046 952
1047static void hdmi_connector_destroy(struct drm_connector *connector) 953static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -1064,6 +970,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
1064{ 970{
1065 struct hdmi_context *hdata = ctx_from_connector(connector); 971 struct hdmi_context *hdata = ctx_from_connector(connector);
1066 struct edid *edid; 972 struct edid *edid;
973 int ret;
1067 974
1068 if (!hdata->ddc_adpt) 975 if (!hdata->ddc_adpt)
1069 return -ENODEV; 976 return -ENODEV;
@@ -1079,15 +986,19 @@ static int hdmi_get_modes(struct drm_connector *connector)
1079 986
1080 drm_mode_connector_update_edid_property(connector, edid); 987 drm_mode_connector_update_edid_property(connector, edid);
1081 988
1082 return drm_add_edid_modes(connector, edid); 989 ret = drm_add_edid_modes(connector, edid);
990
991 kfree(edid);
992
993 return ret;
1083} 994}
1084 995
1085static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) 996static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
1086{ 997{
1087 int i; 998 int i;
1088 999
1089 for (i = 0; i < hdata->phy_conf_count; i++) 1000 for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
1090 if (hdata->phy_confs[i].pixel_clock == pixel_clock) 1001 if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
1091 return i; 1002 return i;
1092 1003
1093 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); 1004 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1120,7 +1031,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
1120{ 1031{
1121 struct hdmi_context *hdata = ctx_from_connector(connector); 1032 struct hdmi_context *hdata = ctx_from_connector(connector);
1122 1033
1123 return hdata->encoder; 1034 return &hdata->encoder;
1124} 1035}
1125 1036
1126static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 1037static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -1129,14 +1040,12 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
1129 .best_encoder = hdmi_best_encoder, 1040 .best_encoder = hdmi_best_encoder,
1130}; 1041};
1131 1042
1132static int hdmi_create_connector(struct exynos_drm_display *display, 1043static int hdmi_create_connector(struct drm_encoder *encoder)
1133 struct drm_encoder *encoder)
1134{ 1044{
1135 struct hdmi_context *hdata = display_to_hdmi(display); 1045 struct hdmi_context *hdata = encoder_to_hdmi(encoder);
1136 struct drm_connector *connector = &hdata->connector; 1046 struct drm_connector *connector = &hdata->connector;
1137 int ret; 1047 int ret;
1138 1048
1139 hdata->encoder = encoder;
1140 connector->interlace_allowed = true; 1049 connector->interlace_allowed = true;
1141 connector->polled = DRM_CONNECTOR_POLL_HPD; 1050 connector->polled = DRM_CONNECTOR_POLL_HPD;
1142 1051
@@ -1154,23 +1063,30 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
1154 return 0; 1063 return 0;
1155} 1064}
1156 1065
1157static void hdmi_mode_fixup(struct exynos_drm_display *display, 1066static bool hdmi_mode_fixup(struct drm_encoder *encoder,
1158 struct drm_connector *connector, 1067 const struct drm_display_mode *mode,
1159 const struct drm_display_mode *mode, 1068 struct drm_display_mode *adjusted_mode)
1160 struct drm_display_mode *adjusted_mode)
1161{ 1069{
1070 struct drm_device *dev = encoder->dev;
1071 struct drm_connector *connector;
1162 struct drm_display_mode *m; 1072 struct drm_display_mode *m;
1163 int mode_ok; 1073 int mode_ok;
1164 1074
1165 DRM_DEBUG_KMS("%s\n", __FILE__);
1166
1167 drm_mode_set_crtcinfo(adjusted_mode, 0); 1075 drm_mode_set_crtcinfo(adjusted_mode, 0);
1168 1076
1077 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1078 if (connector->encoder == encoder)
1079 break;
1080 }
1081
1082 if (connector->encoder != encoder)
1083 return true;
1084
1169 mode_ok = hdmi_mode_valid(connector, adjusted_mode); 1085 mode_ok = hdmi_mode_valid(connector, adjusted_mode);
1170 1086
1171 /* just return if user desired mode exists. */ 1087 /* just return if user desired mode exists. */
1172 if (mode_ok == MODE_OK) 1088 if (mode_ok == MODE_OK)
1173 return; 1089 return true;
1174 1090
1175 /* 1091 /*
1176 * otherwise, find the most suitable mode among modes and change it 1092 * otherwise, find the most suitable mode among modes and change it
@@ -1190,6 +1106,8 @@ static void hdmi_mode_fixup(struct exynos_drm_display *display,
1190 break; 1106 break;
1191 } 1107 }
1192 } 1108 }
1109
1110 return true;
1193} 1111}
1194 1112
1195static void hdmi_set_acr(u32 freq, u8 *acr) 1113static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1252,7 +1170,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
1252 hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); 1170 hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
1253 hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); 1171 hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
1254 1172
1255 if (hdata->type == HDMI_TYPE13) 1173 if (hdata->drv_data->type == HDMI_TYPE13)
1256 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); 1174 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
1257 else 1175 else
1258 hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); 1176 hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
@@ -1386,7 +1304,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1386 HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); 1304 HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
1387 } 1305 }
1388 1306
1389 if (hdata->type == HDMI_TYPE13) { 1307 if (hdata->drv_data->type == HDMI_TYPE13) {
1390 /* choose bluescreen (fecal) color */ 1308 /* choose bluescreen (fecal) color */
1391 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); 1309 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
1392 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); 1310 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
@@ -1419,66 +1337,94 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1419 1337
1420static void hdmi_v13_mode_apply(struct hdmi_context *hdata) 1338static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
1421{ 1339{
1422 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; 1340 struct drm_display_mode *m = &hdata->current_mode;
1423 const struct hdmi_v13_core_regs *core = 1341 unsigned int val;
1424 &hdata->mode_conf.conf.v13_conf.core;
1425 int tries; 1342 int tries;
1426 1343
1427 /* setting core registers */ 1344 hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
1428 hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); 1345 hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
1429 hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); 1346 (m->htotal << 12) | m->vtotal);
1430 hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); 1347
1431 hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); 1348 val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
1432 hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); 1349 hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
1433 hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); 1350
1434 hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); 1351 val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
1435 hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); 1352 hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
1436 hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); 1353
1437 hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); 1354 val = (m->hsync_start - m->hdisplay - 2);
1438 hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); 1355 val |= ((m->hsync_end - m->hdisplay - 2) << 10);
1439 hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); 1356 val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
1440 hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); 1357 hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
1441 hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); 1358
1442 hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); 1359 /*
1443 hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); 1360 * Quirk requirement for exynos HDMI IP design,
1444 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); 1361 * 2 pixels less than the actual calculation for hsync_start
1445 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); 1362 * and end.
1446 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); 1363 */
1447 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); 1364
1448 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); 1365 /* Following values & calculations differ for different type of modes */
1449 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); 1366 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1450 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); 1367 /* Interlaced Mode */
1451 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); 1368 val = ((m->vsync_end - m->vdisplay) / 2);
1452 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); 1369 val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
1370 hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
1371
1372 val = m->vtotal / 2;
1373 val |= ((m->vtotal - m->vdisplay) / 2) << 11;
1374 hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
1375
1376 val = (m->vtotal +
1377 ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
1378 val |= m->vtotal << 11;
1379 hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
1380
1381 val = ((m->vtotal / 2) + 7);
1382 val |= ((m->vtotal / 2) + 2) << 12;
1383 hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
1384
1385 val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
1386 val |= ((m->htotal / 2) +
1387 (m->hsync_start - m->hdisplay)) << 12;
1388 hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
1389
1390 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
1391 (m->vtotal - m->vdisplay) / 2);
1392 hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
1393
1394 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
1395 } else {
1396 /* Progressive Mode */
1397
1398 val = m->vtotal;
1399 val |= (m->vtotal - m->vdisplay) << 11;
1400 hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
1401
1402 hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
1403
1404 val = (m->vsync_end - m->vdisplay);
1405 val |= ((m->vsync_start - m->vdisplay) << 12);
1406 hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
1407
1408 hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
1409 hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
1410 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
1411 m->vtotal - m->vdisplay);
1412 hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
1413 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
1414 }
1415
1453 /* Timing generator registers */ 1416 /* Timing generator registers */
1454 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); 1417 hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
1455 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); 1418 hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
1456 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); 1419 hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
1457 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); 1420 hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
1458 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); 1421 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
1459 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); 1422 hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
1460 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); 1423 hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
1461 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); 1424 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
1462 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); 1425 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
1463 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); 1426 hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
1464 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); 1427 hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
1465 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
1466 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
1467 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
1468 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
1469 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
1470 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
1471 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
1472 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
1473 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
1474 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
1475 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
1476 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
1477 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
1478 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
1479 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
1480 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
1481 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
1482 1428
1483 /* waiting for HDMIPHY's PLL to get to steady state */ 1429 /* waiting for HDMIPHY's PLL to get to steady state */
1484 for (tries = 100; tries; --tries) { 1430 for (tries = 100; tries; --tries) {
@@ -1503,144 +1449,119 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
1503 1449
1504static void hdmi_v14_mode_apply(struct hdmi_context *hdata) 1450static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
1505{ 1451{
1506 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; 1452 struct drm_display_mode *m = &hdata->current_mode;
1507 const struct hdmi_v14_core_regs *core =
1508 &hdata->mode_conf.conf.v14_conf.core;
1509 int tries; 1453 int tries;
1510 1454
1511 /* setting core registers */ 1455 hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
1512 hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); 1456 hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
1513 hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); 1457 hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
1514 hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); 1458 hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
1515 hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); 1459 (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
1516 hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); 1460 hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
1517 hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); 1461 (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
1518 hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); 1462 hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
1519 hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); 1463 (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
1520 hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); 1464
1521 hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); 1465 /*
1522 hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); 1466 * Quirk requirement for exynos 5 HDMI IP design,
1523 hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); 1467 * 2 pixels less than the actual calculation for hsync_start
1524 hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); 1468 * and end.
1525 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); 1469 */
1526 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); 1470
1527 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); 1471 /* Following values & calculations differ for different type of modes */
1528 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); 1472 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1529 hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); 1473 /* Interlaced Mode */
1530 hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); 1474 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
1531 hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); 1475 (m->vsync_end - m->vdisplay) / 2);
1532 hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); 1476 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
1533 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 1477 (m->vsync_start - m->vdisplay) / 2);
1534 core->v_sync_line_bef_2[0]); 1478 hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
1535 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, 1479 hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
1536 core->v_sync_line_bef_2[1]); 1480 (m->vtotal - m->vdisplay) / 2);
1537 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 1481 hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
1538 core->v_sync_line_bef_1[0]); 1482 m->vtotal - m->vdisplay / 2);
1539 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, 1483 hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
1540 core->v_sync_line_bef_1[1]); 1484 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
1541 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 1485 (m->vtotal / 2) + 7);
1542 core->v_sync_line_aft_2[0]); 1486 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
1543 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, 1487 (m->vtotal / 2) + 2);
1544 core->v_sync_line_aft_2[1]); 1488 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
1545 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 1489 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1546 core->v_sync_line_aft_1[0]); 1490 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
1547 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, 1491 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1548 core->v_sync_line_aft_1[1]); 1492 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
1549 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 1493 (m->vtotal - m->vdisplay) / 2);
1550 core->v_sync_line_aft_pxl_2[0]); 1494 hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
1551 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, 1495 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
1552 core->v_sync_line_aft_pxl_2[1]); 1496 m->vtotal - m->vdisplay / 2);
1553 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 1497 hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
1554 core->v_sync_line_aft_pxl_1[0]); 1498 (m->vtotal / 2) + 1);
1555 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, 1499 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
1556 core->v_sync_line_aft_pxl_1[1]); 1500 (m->vtotal / 2) + 1);
1557 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); 1501 hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
1558 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); 1502 (m->vtotal / 2) + 1);
1559 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); 1503 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
1560 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); 1504 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
1561 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); 1505 } else {
1562 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); 1506 /* Progressive Mode */
1563 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); 1507 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
1564 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); 1508 m->vsync_end - m->vdisplay);
1565 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 1509 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
1566 core->v_sync_line_aft_3[0]); 1510 m->vsync_start - m->vdisplay);
1567 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, 1511 hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
1568 core->v_sync_line_aft_3[1]); 1512 hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
1569 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 1513 m->vtotal - m->vdisplay);
1570 core->v_sync_line_aft_4[0]); 1514 hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
1571 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, 1515 hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
1572 core->v_sync_line_aft_4[1]); 1516 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
1573 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 1517 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
1574 core->v_sync_line_aft_5[0]); 1518 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
1575 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, 1519 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
1576 core->v_sync_line_aft_5[1]); 1520 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
1577 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 1521 m->vtotal - m->vdisplay);
1578 core->v_sync_line_aft_6[0]); 1522 hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
1579 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, 1523 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
1580 core->v_sync_line_aft_6[1]); 1524 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
1581 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 1525 hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
1582 core->v_sync_line_aft_pxl_3[0]); 1526 hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
1583 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, 1527 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
1584 core->v_sync_line_aft_pxl_3[1]); 1528 hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
1585 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 1529 }
1586 core->v_sync_line_aft_pxl_4[0]); 1530
1587 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, 1531 /* Following values & calculations are same irrespective of mode type */
1588 core->v_sync_line_aft_pxl_4[1]); 1532 hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
1589 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 1533 m->hsync_start - m->hdisplay - 2);
1590 core->v_sync_line_aft_pxl_5[0]); 1534 hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
1591 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, 1535 m->hsync_end - m->hdisplay - 2);
1592 core->v_sync_line_aft_pxl_5[1]); 1536 hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
1593 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 1537 hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
1594 core->v_sync_line_aft_pxl_6[0]); 1538 hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
1595 hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, 1539 hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
1596 core->v_sync_line_aft_pxl_6[1]); 1540 hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
1597 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); 1541 hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
1598 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); 1542 hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
1599 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); 1543 hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
1600 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); 1544 hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
1601 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); 1545 hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
1602 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); 1546 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
1603 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); 1547 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
1604 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); 1548 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
1605 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); 1549 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
1606 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); 1550 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
1607 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); 1551 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
1608 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); 1552 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
1553 hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
1609 1554
1610 /* Timing generator registers */ 1555 /* Timing generator registers */
1611 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); 1556 hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
1612 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); 1557 hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
1613 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); 1558 hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
1614 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); 1559 hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
1615 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); 1560 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
1616 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); 1561 hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
1617 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); 1562 hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
1618 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); 1563 hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
1619 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); 1564 hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
1620 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
1621 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
1622 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
1623 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
1624 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
1625 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
1626 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
1627 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
1628 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
1629 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
1630 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
1631 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
1632 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
1633 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
1634 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
1635 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
1636 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
1637 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
1638 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
1639 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
1640 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
1641 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
1642 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
1643 hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
1644 1565
1645 /* waiting for HDMIPHY's PLL to get to steady state */ 1566 /* waiting for HDMIPHY's PLL to get to steady state */
1646 for (tries = 100; tries; --tries) { 1567 for (tries = 100; tries; --tries) {
@@ -1665,7 +1586,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
1665 1586
1666static void hdmi_mode_apply(struct hdmi_context *hdata) 1587static void hdmi_mode_apply(struct hdmi_context *hdata)
1667{ 1588{
1668 if (hdata->type == HDMI_TYPE13) 1589 if (hdata->drv_data->type == HDMI_TYPE13)
1669 hdmi_v13_mode_apply(hdata); 1590 hdmi_v13_mode_apply(hdata);
1670 else 1591 else
1671 hdmi_v14_mode_apply(hdata); 1592 hdmi_v14_mode_apply(hdata);
@@ -1683,7 +1604,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1683 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, 1604 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1684 HDMI_PHY_ENABLE_MODE_SET); 1605 HDMI_PHY_ENABLE_MODE_SET);
1685 1606
1686 if (hdata->type == HDMI_TYPE13) 1607 if (hdata->drv_data->type == HDMI_TYPE13)
1687 reg = HDMI_V13_PHY_RSTOUT; 1608 reg = HDMI_V13_PHY_RSTOUT;
1688 else 1609 else
1689 reg = HDMI_PHY_RSTOUT; 1610 reg = HDMI_PHY_RSTOUT;
@@ -1697,7 +1618,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1697 1618
1698static void hdmiphy_poweron(struct hdmi_context *hdata) 1619static void hdmiphy_poweron(struct hdmi_context *hdata)
1699{ 1620{
1700 if (hdata->type != HDMI_TYPE14) 1621 if (hdata->drv_data->type != HDMI_TYPE14)
1701 return; 1622 return;
1702 1623
1703 DRM_DEBUG_KMS("\n"); 1624 DRM_DEBUG_KMS("\n");
@@ -1717,7 +1638,7 @@ static void hdmiphy_poweron(struct hdmi_context *hdata)
1717 1638
1718static void hdmiphy_poweroff(struct hdmi_context *hdata) 1639static void hdmiphy_poweroff(struct hdmi_context *hdata)
1719{ 1640{
1720 if (hdata->type != HDMI_TYPE14) 1641 if (hdata->drv_data->type != HDMI_TYPE14)
1721 return; 1642 return;
1722 1643
1723 DRM_DEBUG_KMS("\n"); 1644 DRM_DEBUG_KMS("\n");
@@ -1743,13 +1664,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1743 int i; 1664 int i;
1744 1665
1745 /* pixel clock */ 1666 /* pixel clock */
1746 i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock); 1667 i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
1747 if (i < 0) { 1668 if (i < 0) {
1748 DRM_ERROR("failed to find hdmiphy conf\n"); 1669 DRM_ERROR("failed to find hdmiphy conf\n");
1749 return; 1670 return;
1750 } 1671 }
1751 1672
1752 ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32); 1673 ret = hdmiphy_reg_write_buf(hdata, 0,
1674 hdata->drv_data->phy_confs[i].conf, 32);
1753 if (ret) { 1675 if (ret) {
1754 DRM_ERROR("failed to configure hdmiphy\n"); 1676 DRM_ERROR("failed to configure hdmiphy\n");
1755 return; 1677 return;
@@ -1771,10 +1693,8 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1771 hdmiphy_conf_reset(hdata); 1693 hdmiphy_conf_reset(hdata);
1772 hdmiphy_conf_apply(hdata); 1694 hdmiphy_conf_apply(hdata);
1773 1695
1774 mutex_lock(&hdata->hdmi_mutex);
1775 hdmi_start(hdata, false); 1696 hdmi_start(hdata, false);
1776 hdmi_conf_init(hdata); 1697 hdmi_conf_init(hdata);
1777 mutex_unlock(&hdata->hdmi_mutex);
1778 1698
1779 hdmi_audio_init(hdata); 1699 hdmi_audio_init(hdata);
1780 1700
@@ -1785,271 +1705,32 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1785 hdmi_regs_dump(hdata, "start"); 1705 hdmi_regs_dump(hdata, "start");
1786} 1706}
1787 1707
1788static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) 1708static void hdmi_mode_set(struct drm_encoder *encoder,
1789{ 1709 struct drm_display_mode *mode,
1790 int i; 1710 struct drm_display_mode *adjusted_mode)
1791 BUG_ON(num_bytes > 4);
1792 for (i = 0; i < num_bytes; i++)
1793 reg_pair[i] = (value >> (8 * i)) & 0xff;
1794}
1795
1796static void hdmi_v13_mode_set(struct hdmi_context *hdata,
1797 struct drm_display_mode *m)
1798{ 1711{
1799 struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; 1712 struct hdmi_context *hdata = encoder_to_hdmi(encoder);
1800 struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; 1713 struct drm_display_mode *m = adjusted_mode;
1801 unsigned int val;
1802
1803 hdata->mode_conf.cea_video_id =
1804 drm_match_cea_mode((struct drm_display_mode *)m);
1805 hdata->mode_conf.pixel_clock = m->clock * 1000;
1806 hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
1807
1808 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1809 hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
1810
1811 val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
1812 hdmi_set_reg(core->vsync_pol, 1, val);
1813
1814 val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
1815 hdmi_set_reg(core->int_pro_mode, 1, val);
1816
1817 val = (m->hsync_start - m->hdisplay - 2);
1818 val |= ((m->hsync_end - m->hdisplay - 2) << 10);
1819 val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
1820 hdmi_set_reg(core->h_sync_gen, 3, val);
1821
1822 /*
1823 * Quirk requirement for exynos HDMI IP design,
1824 * 2 pixels less than the actual calculation for hsync_start
1825 * and end.
1826 */
1827
1828 /* Following values & calculations differ for different type of modes */
1829 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1830 /* Interlaced Mode */
1831 val = ((m->vsync_end - m->vdisplay) / 2);
1832 val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
1833 hdmi_set_reg(core->v_sync_gen1, 3, val);
1834
1835 val = m->vtotal / 2;
1836 val |= ((m->vtotal - m->vdisplay) / 2) << 11;
1837 hdmi_set_reg(core->v_blank, 3, val);
1838
1839 val = (m->vtotal +
1840 ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
1841 val |= m->vtotal << 11;
1842 hdmi_set_reg(core->v_blank_f, 3, val);
1843
1844 val = ((m->vtotal / 2) + 7);
1845 val |= ((m->vtotal / 2) + 2) << 12;
1846 hdmi_set_reg(core->v_sync_gen2, 3, val);
1847
1848 val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
1849 val |= ((m->htotal / 2) +
1850 (m->hsync_start - m->hdisplay)) << 12;
1851 hdmi_set_reg(core->v_sync_gen3, 3, val);
1852
1853 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
1854 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
1855
1856 hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
1857 } else {
1858 /* Progressive Mode */
1859
1860 val = m->vtotal;
1861 val |= (m->vtotal - m->vdisplay) << 11;
1862 hdmi_set_reg(core->v_blank, 3, val);
1863
1864 hdmi_set_reg(core->v_blank_f, 3, 0);
1865
1866 val = (m->vsync_end - m->vdisplay);
1867 val |= ((m->vsync_start - m->vdisplay) << 12);
1868 hdmi_set_reg(core->v_sync_gen1, 3, val);
1869
1870 hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
1871 hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
1872 hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
1873 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
1874 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
1875 }
1876
1877 /* Timing generator registers */
1878 hdmi_set_reg(tg->cmd, 1, 0x0);
1879 hdmi_set_reg(tg->h_fsz, 2, m->htotal);
1880 hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
1881 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
1882 hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
1883 hdmi_set_reg(tg->vsync, 2, 0x1);
1884 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1885 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
1886 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
1887 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1888 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1889 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1890 hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
1891}
1892
1893static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1894 struct drm_display_mode *m)
1895{
1896 struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
1897 struct hdmi_v14_core_regs *core =
1898 &hdata->mode_conf.conf.v14_conf.core;
1899
1900 hdata->mode_conf.cea_video_id =
1901 drm_match_cea_mode((struct drm_display_mode *)m);
1902 hdata->mode_conf.pixel_clock = m->clock * 1000;
1903 hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
1904
1905 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1906 hdmi_set_reg(core->v_line, 2, m->vtotal);
1907 hdmi_set_reg(core->h_line, 2, m->htotal);
1908 hdmi_set_reg(core->hsync_pol, 1,
1909 (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
1910 hdmi_set_reg(core->vsync_pol, 1,
1911 (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
1912 hdmi_set_reg(core->int_pro_mode, 1,
1913 (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
1914
1915 /*
1916 * Quirk requirement for exynos 5 HDMI IP design,
1917 * 2 pixels less than the actual calculation for hsync_start
1918 * and end.
1919 */
1920
1921 /* Following values & calculations differ for different type of modes */
1922 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1923 /* Interlaced Mode */
1924 hdmi_set_reg(core->v_sync_line_bef_2, 2,
1925 (m->vsync_end - m->vdisplay) / 2);
1926 hdmi_set_reg(core->v_sync_line_bef_1, 2,
1927 (m->vsync_start - m->vdisplay) / 2);
1928 hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
1929 hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
1930 hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
1931 hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
1932 hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
1933 hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
1934 hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
1935 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1936 hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
1937 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1938 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
1939 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
1940 hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
1941 hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
1942 hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
1943 hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
1944 hdmi_set_reg(tg->vact_st3, 2, 0x0);
1945 hdmi_set_reg(tg->vact_st4, 2, 0x0);
1946 } else {
1947 /* Progressive Mode */
1948 hdmi_set_reg(core->v_sync_line_bef_2, 2,
1949 m->vsync_end - m->vdisplay);
1950 hdmi_set_reg(core->v_sync_line_bef_1, 2,
1951 m->vsync_start - m->vdisplay);
1952 hdmi_set_reg(core->v2_blank, 2, m->vtotal);
1953 hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
1954 hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
1955 hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
1956 hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
1957 hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
1958 hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
1959 hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
1960 hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
1961 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
1962 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
1963 hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
1964 hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
1965 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1966 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1967 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1968 }
1969
1970 /* Following values & calculations are same irrespective of mode type */
1971 hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
1972 hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
1973 hdmi_set_reg(core->vact_space_1, 2, 0xffff);
1974 hdmi_set_reg(core->vact_space_2, 2, 0xffff);
1975 hdmi_set_reg(core->vact_space_3, 2, 0xffff);
1976 hdmi_set_reg(core->vact_space_4, 2, 0xffff);
1977 hdmi_set_reg(core->vact_space_5, 2, 0xffff);
1978 hdmi_set_reg(core->vact_space_6, 2, 0xffff);
1979 hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
1980 hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
1981 hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
1982 hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
1983 hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
1984 hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
1985 hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
1986 hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
1987 hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
1988 hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
1989 hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
1990 hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
1991
1992 /* Timing generator registers */
1993 hdmi_set_reg(tg->cmd, 1, 0x0);
1994 hdmi_set_reg(tg->h_fsz, 2, m->htotal);
1995 hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
1996 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
1997 hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
1998 hdmi_set_reg(tg->vsync, 2, 0x1);
1999 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
2000 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
2001 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
2002 hdmi_set_reg(tg->tg_3d, 1, 0x0);
2003}
2004
2005static void hdmi_mode_set(struct exynos_drm_display *display,
2006 struct drm_display_mode *mode)
2007{
2008 struct hdmi_context *hdata = display_to_hdmi(display);
2009 struct drm_display_mode *m = mode;
2010 1714
2011 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", 1715 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
2012 m->hdisplay, m->vdisplay, 1716 m->hdisplay, m->vdisplay,
2013 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1717 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
2014 "INTERLACED" : "PROGRESSIVE"); 1718 "INTERLACED" : "PROGRESSIVE");
2015 1719
2016 /* preserve mode information for later use. */ 1720 drm_mode_copy(&hdata->current_mode, m);
2017 drm_mode_copy(&hdata->current_mode, mode); 1721 hdata->cea_video_id = drm_match_cea_mode(mode);
2018
2019 if (hdata->type == HDMI_TYPE13)
2020 hdmi_v13_mode_set(hdata, mode);
2021 else
2022 hdmi_v14_mode_set(hdata, mode);
2023}
2024
2025static void hdmi_commit(struct exynos_drm_display *display)
2026{
2027 struct hdmi_context *hdata = display_to_hdmi(display);
2028
2029 mutex_lock(&hdata->hdmi_mutex);
2030 if (!hdata->powered) {
2031 mutex_unlock(&hdata->hdmi_mutex);
2032 return;
2033 }
2034 mutex_unlock(&hdata->hdmi_mutex);
2035
2036 hdmi_conf_apply(hdata);
2037} 1722}
2038 1723
2039static void hdmi_poweron(struct hdmi_context *hdata) 1724static void hdmi_enable(struct drm_encoder *encoder)
2040{ 1725{
1726 struct hdmi_context *hdata = encoder_to_hdmi(encoder);
2041 struct hdmi_resources *res = &hdata->res; 1727 struct hdmi_resources *res = &hdata->res;
2042 1728
2043 mutex_lock(&hdata->hdmi_mutex); 1729 if (hdata->powered)
2044 if (hdata->powered) {
2045 mutex_unlock(&hdata->hdmi_mutex);
2046 return; 1730 return;
2047 }
2048 1731
2049 hdata->powered = true; 1732 hdata->powered = true;
2050 1733
2051 mutex_unlock(&hdata->hdmi_mutex);
2052
2053 pm_runtime_get_sync(hdata->dev); 1734 pm_runtime_get_sync(hdata->dev);
2054 1735
2055 if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) 1736 if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
@@ -2063,17 +1744,32 @@ static void hdmi_poweron(struct hdmi_context *hdata)
2063 clk_prepare_enable(res->sclk_hdmi); 1744 clk_prepare_enable(res->sclk_hdmi);
2064 1745
2065 hdmiphy_poweron(hdata); 1746 hdmiphy_poweron(hdata);
2066 hdmi_commit(&hdata->display); 1747 hdmi_conf_apply(hdata);
2067} 1748}
2068 1749
2069static void hdmi_poweroff(struct hdmi_context *hdata) 1750static void hdmi_disable(struct drm_encoder *encoder)
2070{ 1751{
1752 struct hdmi_context *hdata = encoder_to_hdmi(encoder);
2071 struct hdmi_resources *res = &hdata->res; 1753 struct hdmi_resources *res = &hdata->res;
1754 struct drm_crtc *crtc = encoder->crtc;
1755 const struct drm_crtc_helper_funcs *funcs = NULL;
2072 1756
2073 mutex_lock(&hdata->hdmi_mutex);
2074 if (!hdata->powered) 1757 if (!hdata->powered)
2075 goto out; 1758 return;
2076 mutex_unlock(&hdata->hdmi_mutex); 1759
1760 /*
1761 * The SFRs of VP and Mixer are updated by Vertical Sync of
1762 * Timing generator which is a part of HDMI so the sequence
1763 * to disable TV Subsystem should be as following,
1764 * VP -> Mixer -> HDMI
1765 *
1766 * Below codes will try to disable Mixer and VP(if used)
1767 * prior to disabling HDMI.
1768 */
1769 if (crtc)
1770 funcs = crtc->helper_private;
1771 if (funcs && funcs->disable)
1772 (*funcs->disable)(crtc);
2077 1773
2078 /* HDMI System Disable */ 1774 /* HDMI System Disable */
2079 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN); 1775 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
@@ -2093,57 +1789,18 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
2093 1789
2094 pm_runtime_put_sync(hdata->dev); 1790 pm_runtime_put_sync(hdata->dev);
2095 1791
2096 mutex_lock(&hdata->hdmi_mutex);
2097 hdata->powered = false; 1792 hdata->powered = false;
2098
2099out:
2100 mutex_unlock(&hdata->hdmi_mutex);
2101} 1793}
2102 1794
2103static void hdmi_dpms(struct exynos_drm_display *display, int mode) 1795static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
2104{
2105 struct hdmi_context *hdata = display_to_hdmi(display);
2106 struct drm_encoder *encoder = hdata->encoder;
2107 struct drm_crtc *crtc = encoder->crtc;
2108 const struct drm_crtc_helper_funcs *funcs = NULL;
2109
2110 DRM_DEBUG_KMS("mode %d\n", mode);
2111
2112 switch (mode) {
2113 case DRM_MODE_DPMS_ON:
2114 hdmi_poweron(hdata);
2115 break;
2116 case DRM_MODE_DPMS_STANDBY:
2117 case DRM_MODE_DPMS_SUSPEND:
2118 case DRM_MODE_DPMS_OFF:
2119 /*
2120 * The SFRs of VP and Mixer are updated by Vertical Sync of
2121 * Timing generator which is a part of HDMI so the sequence
2122 * to disable TV Subsystem should be as following,
2123 * VP -> Mixer -> HDMI
2124 *
2125 * Below codes will try to disable Mixer and VP(if used)
2126 * prior to disabling HDMI.
2127 */
2128 if (crtc)
2129 funcs = crtc->helper_private;
2130 if (funcs && funcs->disable)
2131 (*funcs->disable)(crtc);
2132
2133 hdmi_poweroff(hdata);
2134 break;
2135 default:
2136 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
2137 break;
2138 }
2139}
2140
2141static struct exynos_drm_display_ops hdmi_display_ops = {
2142 .create_connector = hdmi_create_connector,
2143 .mode_fixup = hdmi_mode_fixup, 1796 .mode_fixup = hdmi_mode_fixup,
2144 .mode_set = hdmi_mode_set, 1797 .mode_set = hdmi_mode_set,
2145 .dpms = hdmi_dpms, 1798 .enable = hdmi_enable,
2146 .commit = hdmi_commit, 1799 .disable = hdmi_disable,
1800};
1801
1802static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
1803 .destroy = drm_encoder_cleanup,
2147}; 1804};
2148 1805
2149static void hdmi_hotplug_work_func(struct work_struct *work) 1806static void hdmi_hotplug_work_func(struct work_struct *work)
@@ -2152,10 +1809,6 @@ static void hdmi_hotplug_work_func(struct work_struct *work)
2152 1809
2153 hdata = container_of(work, struct hdmi_context, hotplug_work.work); 1810 hdata = container_of(work, struct hdmi_context, hotplug_work.work);
2154 1811
2155 mutex_lock(&hdata->hdmi_mutex);
2156 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2157 mutex_unlock(&hdata->hdmi_mutex);
2158
2159 if (hdata->drm_dev) 1812 if (hdata->drm_dev)
2160 drm_helper_hpd_irq_event(hdata->drm_dev); 1813 drm_helper_hpd_irq_event(hdata->drm_dev);
2161} 1814}
@@ -2254,30 +1907,6 @@ fail:
2254 return ret; 1907 return ret;
2255} 1908}
2256 1909
2257static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
2258 (struct device *dev)
2259{
2260 struct device_node *np = dev->of_node;
2261 struct s5p_hdmi_platform_data *pd;
2262 u32 value;
2263
2264 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
2265 if (!pd)
2266 goto err_data;
2267
2268 if (!of_find_property(np, "hpd-gpio", &value)) {
2269 DRM_ERROR("no hpd gpio property found\n");
2270 goto err_data;
2271 }
2272
2273 pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
2274
2275 return pd;
2276
2277err_data:
2278 return NULL;
2279}
2280
2281static struct of_device_id hdmi_match_types[] = { 1910static struct of_device_id hdmi_match_types[] = {
2282 { 1911 {
2283 .compatible = "samsung,exynos5-hdmi", 1912 .compatible = "samsung,exynos5-hdmi",
@@ -2301,10 +1930,33 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
2301{ 1930{
2302 struct drm_device *drm_dev = data; 1931 struct drm_device *drm_dev = data;
2303 struct hdmi_context *hdata = dev_get_drvdata(dev); 1932 struct hdmi_context *hdata = dev_get_drvdata(dev);
1933 struct drm_encoder *encoder = &hdata->encoder;
1934 int ret, pipe;
2304 1935
2305 hdata->drm_dev = drm_dev; 1936 hdata->drm_dev = drm_dev;
2306 1937
2307 return exynos_drm_create_enc_conn(drm_dev, &hdata->display); 1938 pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
1939 EXYNOS_DISPLAY_TYPE_HDMI);
1940 if (pipe < 0)
1941 return pipe;
1942
1943 encoder->possible_crtcs = 1 << pipe;
1944
1945 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1946
1947 drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
1948 DRM_MODE_ENCODER_TMDS);
1949
1950 drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
1951
1952 ret = hdmi_create_connector(encoder);
1953 if (ret) {
1954 DRM_ERROR("failed to create connector ret = %d\n", ret);
1955 drm_encoder_cleanup(encoder);
1956 return ret;
1957 }
1958
1959 return 0;
2308} 1960}
2309 1961
2310static void hdmi_unbind(struct device *dev, struct device *master, void *data) 1962static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2338,43 +1990,30 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
2338static int hdmi_probe(struct platform_device *pdev) 1990static int hdmi_probe(struct platform_device *pdev)
2339{ 1991{
2340 struct device_node *ddc_node, *phy_node; 1992 struct device_node *ddc_node, *phy_node;
2341 struct s5p_hdmi_platform_data *pdata;
2342 struct hdmi_driver_data *drv_data;
2343 const struct of_device_id *match; 1993 const struct of_device_id *match;
2344 struct device *dev = &pdev->dev; 1994 struct device *dev = &pdev->dev;
2345 struct hdmi_context *hdata; 1995 struct hdmi_context *hdata;
2346 struct resource *res; 1996 struct resource *res;
2347 int ret; 1997 int ret;
2348 1998
2349 if (!dev->of_node)
2350 return -ENODEV;
2351
2352 pdata = drm_hdmi_dt_parse_pdata(dev);
2353 if (!pdata)
2354 return -EINVAL;
2355
2356 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); 1999 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
2357 if (!hdata) 2000 if (!hdata)
2358 return -ENOMEM; 2001 return -ENOMEM;
2359 2002
2360 hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI; 2003 match = of_match_device(hdmi_match_types, dev);
2361 hdata->display.ops = &hdmi_display_ops;
2362
2363 mutex_init(&hdata->hdmi_mutex);
2364
2365 platform_set_drvdata(pdev, hdata);
2366
2367 match = of_match_node(hdmi_match_types, dev->of_node);
2368 if (!match) 2004 if (!match)
2369 return -ENODEV; 2005 return -ENODEV;
2370 2006
2371 drv_data = (struct hdmi_driver_data *)match->data; 2007 hdata->drv_data = match->data;
2372 hdata->type = drv_data->type; 2008
2373 hdata->phy_confs = drv_data->phy_confs; 2009 platform_set_drvdata(pdev, hdata);
2374 hdata->phy_conf_count = drv_data->phy_conf_count;
2375 2010
2376 hdata->hpd_gpio = pdata->hpd_gpio;
2377 hdata->dev = dev; 2011 hdata->dev = dev;
2012 hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
2013 if (hdata->hpd_gpio < 0) {
2014 DRM_ERROR("cannot get hpd gpio property\n");
2015 return hdata->hpd_gpio;
2016 }
2378 2017
2379 ret = hdmi_resources_init(hdata); 2018 ret = hdmi_resources_init(hdata);
2380 if (ret) { 2019 if (ret) {
@@ -2426,7 +2065,7 @@ out_get_ddc_adpt:
2426 } 2065 }
2427 2066
2428out_get_phy_port: 2067out_get_phy_port:
2429 if (drv_data->is_apb_phy) { 2068 if (hdata->drv_data->is_apb_phy) {
2430 hdata->regs_hdmiphy = of_iomap(phy_node, 0); 2069 hdata->regs_hdmiphy = of_iomap(phy_node, 0);
2431 if (!hdata->regs_hdmiphy) { 2070 if (!hdata->regs_hdmiphy) {
2432 DRM_ERROR("failed to ioremap hdmi phy\n"); 2071 DRM_ERROR("failed to ioremap hdmi phy\n");
@@ -2449,8 +2088,6 @@ out_get_phy_port:
2449 goto err_hdmiphy; 2088 goto err_hdmiphy;
2450 } 2089 }
2451 2090
2452 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2453
2454 INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); 2091 INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
2455 2092
2456 ret = devm_request_threaded_irq(dev, hdata->irq, NULL, 2093 ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..e68340c77676 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -69,6 +69,11 @@ enum mixer_version_id {
69 MXR_VER_128_0_0_184, 69 MXR_VER_128_0_0_184,
70}; 70};
71 71
72enum mixer_flag_bits {
73 MXR_BIT_POWERED,
74 MXR_BIT_VSYNC,
75};
76
72struct mixer_context { 77struct mixer_context {
73 struct platform_device *pdev; 78 struct platform_device *pdev;
74 struct device *dev; 79 struct device *dev;
@@ -76,13 +81,11 @@ struct mixer_context {
76 struct exynos_drm_crtc *crtc; 81 struct exynos_drm_crtc *crtc;
77 struct exynos_drm_plane planes[MIXER_WIN_NR]; 82 struct exynos_drm_plane planes[MIXER_WIN_NR];
78 int pipe; 83 int pipe;
84 unsigned long flags;
79 bool interlace; 85 bool interlace;
80 bool powered;
81 bool vp_enabled; 86 bool vp_enabled;
82 bool has_sclk; 87 bool has_sclk;
83 u32 int_en;
84 88
85 struct mutex mixer_mutex;
86 struct mixer_resources mixer_res; 89 struct mixer_resources mixer_res;
87 enum mixer_version_id mxr_ver; 90 enum mixer_version_id mxr_ver;
88 wait_queue_head_t wait_vsync_queue; 91 wait_queue_head_t wait_vsync_queue;
@@ -380,19 +383,20 @@ static void mixer_stop(struct mixer_context *ctx)
380 usleep_range(10000, 12000); 383 usleep_range(10000, 12000);
381} 384}
382 385
383static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) 386static void vp_video_buffer(struct mixer_context *ctx,
387 struct exynos_drm_plane *plane)
384{ 388{
385 struct mixer_resources *res = &ctx->mixer_res; 389 struct mixer_resources *res = &ctx->mixer_res;
390 struct drm_plane_state *state = plane->base.state;
391 struct drm_framebuffer *fb = state->fb;
392 struct drm_display_mode *mode = &state->crtc->mode;
386 unsigned long flags; 393 unsigned long flags;
387 struct exynos_drm_plane *plane;
388 dma_addr_t luma_addr[2], chroma_addr[2]; 394 dma_addr_t luma_addr[2], chroma_addr[2];
389 bool tiled_mode = false; 395 bool tiled_mode = false;
390 bool crcb_mode = false; 396 bool crcb_mode = false;
391 u32 val; 397 u32 val;
392 398
393 plane = &ctx->planes[win]; 399 switch (fb->pixel_format) {
394
395 switch (plane->pixel_format) {
396 case DRM_FORMAT_NV12: 400 case DRM_FORMAT_NV12:
397 crcb_mode = false; 401 crcb_mode = false;
398 break; 402 break;
@@ -401,21 +405,21 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
401 break; 405 break;
402 default: 406 default:
403 DRM_ERROR("pixel format for vp is wrong [%d].\n", 407 DRM_ERROR("pixel format for vp is wrong [%d].\n",
404 plane->pixel_format); 408 fb->pixel_format);
405 return; 409 return;
406 } 410 }
407 411
408 luma_addr[0] = plane->dma_addr[0]; 412 luma_addr[0] = plane->dma_addr[0];
409 chroma_addr[0] = plane->dma_addr[1]; 413 chroma_addr[0] = plane->dma_addr[1];
410 414
411 if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) { 415 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
412 ctx->interlace = true; 416 ctx->interlace = true;
413 if (tiled_mode) { 417 if (tiled_mode) {
414 luma_addr[1] = luma_addr[0] + 0x40; 418 luma_addr[1] = luma_addr[0] + 0x40;
415 chroma_addr[1] = chroma_addr[0] + 0x40; 419 chroma_addr[1] = chroma_addr[0] + 0x40;
416 } else { 420 } else {
417 luma_addr[1] = luma_addr[0] + plane->pitch; 421 luma_addr[1] = luma_addr[0] + fb->pitches[0];
418 chroma_addr[1] = chroma_addr[0] + plane->pitch; 422 chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
419 } 423 }
420 } else { 424 } else {
421 ctx->interlace = false; 425 ctx->interlace = false;
@@ -436,25 +440,25 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
436 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); 440 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
437 441
438 /* setting size of input image */ 442 /* setting size of input image */
439 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) | 443 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
440 VP_IMG_VSIZE(plane->fb_height)); 444 VP_IMG_VSIZE(fb->height));
441 /* chroma height has to reduced by 2 to avoid chroma distorions */ 445 /* chroma height has to reduced by 2 to avoid chroma distorions */
442 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) | 446 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
443 VP_IMG_VSIZE(plane->fb_height / 2)); 447 VP_IMG_VSIZE(fb->height / 2));
444 448
445 vp_reg_write(res, VP_SRC_WIDTH, plane->src_width); 449 vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
446 vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height); 450 vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
447 vp_reg_write(res, VP_SRC_H_POSITION, 451 vp_reg_write(res, VP_SRC_H_POSITION,
448 VP_SRC_H_POSITION_VAL(plane->src_x)); 452 VP_SRC_H_POSITION_VAL(plane->src_x));
449 vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y); 453 vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
450 454
451 vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width); 455 vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
452 vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x); 456 vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
453 if (ctx->interlace) { 457 if (ctx->interlace) {
454 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2); 458 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
455 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2); 459 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
456 } else { 460 } else {
457 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height); 461 vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
458 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y); 462 vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
459 } 463 }
460 464
@@ -469,9 +473,9 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
469 vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]); 473 vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
470 vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]); 474 vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
471 475
472 mixer_cfg_scan(ctx, plane->mode_height); 476 mixer_cfg_scan(ctx, mode->vdisplay);
473 mixer_cfg_rgb_fmt(ctx, plane->mode_height); 477 mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
474 mixer_cfg_layer(ctx, win, true); 478 mixer_cfg_layer(ctx, plane->zpos, true);
475 mixer_run(ctx); 479 mixer_run(ctx);
476 480
477 mixer_vsync_set_update(ctx, true); 481 mixer_vsync_set_update(ctx, true);
@@ -491,15 +495,15 @@ static void mixer_layer_update(struct mixer_context *ctx)
491static int mixer_setup_scale(const struct exynos_drm_plane *plane, 495static int mixer_setup_scale(const struct exynos_drm_plane *plane,
492 unsigned int *x_ratio, unsigned int *y_ratio) 496 unsigned int *x_ratio, unsigned int *y_ratio)
493{ 497{
494 if (plane->crtc_width != plane->src_width) { 498 if (plane->crtc_w != plane->src_w) {
495 if (plane->crtc_width == 2 * plane->src_width) 499 if (plane->crtc_w == 2 * plane->src_w)
496 *x_ratio = 1; 500 *x_ratio = 1;
497 else 501 else
498 goto fail; 502 goto fail;
499 } 503 }
500 504
501 if (plane->crtc_height != plane->src_height) { 505 if (plane->crtc_h != plane->src_h) {
502 if (plane->crtc_height == 2 * plane->src_height) 506 if (plane->crtc_h == 2 * plane->src_h)
503 *y_ratio = 1; 507 *y_ratio = 1;
504 else 508 else
505 goto fail; 509 goto fail;
@@ -512,20 +516,22 @@ fail:
512 return -ENOTSUPP; 516 return -ENOTSUPP;
513} 517}
514 518
515static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) 519static void mixer_graph_buffer(struct mixer_context *ctx,
520 struct exynos_drm_plane *plane)
516{ 521{
517 struct mixer_resources *res = &ctx->mixer_res; 522 struct mixer_resources *res = &ctx->mixer_res;
523 struct drm_plane_state *state = plane->base.state;
524 struct drm_framebuffer *fb = state->fb;
525 struct drm_display_mode *mode = &state->crtc->mode;
518 unsigned long flags; 526 unsigned long flags;
519 struct exynos_drm_plane *plane; 527 unsigned int win = plane->zpos;
520 unsigned int x_ratio = 0, y_ratio = 0; 528 unsigned int x_ratio = 0, y_ratio = 0;
521 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; 529 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
522 dma_addr_t dma_addr; 530 dma_addr_t dma_addr;
523 unsigned int fmt; 531 unsigned int fmt;
524 u32 val; 532 u32 val;
525 533
526 plane = &ctx->planes[win]; 534 switch (fb->pixel_format) {
527
528 switch (plane->pixel_format) {
529 case DRM_FORMAT_XRGB4444: 535 case DRM_FORMAT_XRGB4444:
530 fmt = MXR_FORMAT_ARGB4444; 536 fmt = MXR_FORMAT_ARGB4444;
531 break; 537 break;
@@ -557,12 +563,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
557 563
558 /* converting dma address base and source offset */ 564 /* converting dma address base and source offset */
559 dma_addr = plane->dma_addr[0] 565 dma_addr = plane->dma_addr[0]
560 + (plane->src_x * plane->bpp >> 3) 566 + (plane->src_x * fb->bits_per_pixel >> 3)
561 + (plane->src_y * plane->pitch); 567 + (plane->src_y * fb->pitches[0]);
562 src_x_offset = 0; 568 src_x_offset = 0;
563 src_y_offset = 0; 569 src_y_offset = 0;
564 570
565 if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) 571 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
566 ctx->interlace = true; 572 ctx->interlace = true;
567 else 573 else
568 ctx->interlace = false; 574 ctx->interlace = false;
@@ -576,18 +582,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
576 582
577 /* setup geometry */ 583 /* setup geometry */
578 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), 584 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
579 plane->pitch / (plane->bpp >> 3)); 585 fb->pitches[0] / (fb->bits_per_pixel >> 3));
580 586
581 /* setup display size */ 587 /* setup display size */
582 if (ctx->mxr_ver == MXR_VER_128_0_0_184 && 588 if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
583 win == MIXER_DEFAULT_WIN) { 589 win == MIXER_DEFAULT_WIN) {
584 val = MXR_MXR_RES_HEIGHT(plane->mode_height); 590 val = MXR_MXR_RES_HEIGHT(mode->vdisplay);
585 val |= MXR_MXR_RES_WIDTH(plane->mode_width); 591 val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
586 mixer_reg_write(res, MXR_RESOLUTION, val); 592 mixer_reg_write(res, MXR_RESOLUTION, val);
587 } 593 }
588 594
589 val = MXR_GRP_WH_WIDTH(plane->src_width); 595 val = MXR_GRP_WH_WIDTH(plane->src_w);
590 val |= MXR_GRP_WH_HEIGHT(plane->src_height); 596 val |= MXR_GRP_WH_HEIGHT(plane->src_h);
591 val |= MXR_GRP_WH_H_SCALE(x_ratio); 597 val |= MXR_GRP_WH_H_SCALE(x_ratio);
592 val |= MXR_GRP_WH_V_SCALE(y_ratio); 598 val |= MXR_GRP_WH_V_SCALE(y_ratio);
593 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); 599 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +611,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
605 /* set buffer address to mixer */ 611 /* set buffer address to mixer */
606 mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr); 612 mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
607 613
608 mixer_cfg_scan(ctx, plane->mode_height); 614 mixer_cfg_scan(ctx, mode->vdisplay);
609 mixer_cfg_rgb_fmt(ctx, plane->mode_height); 615 mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
610 mixer_cfg_layer(ctx, win, true); 616 mixer_cfg_layer(ctx, win, true);
611 617
612 /* layer update mandatory for mixer 16.0.33.0 */ 618 /* layer update mandatory for mixer 16.0.33.0 */
@@ -718,6 +724,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
718 724
719 /* handling VSYNC */ 725 /* handling VSYNC */
720 if (val & MXR_INT_STATUS_VSYNC) { 726 if (val & MXR_INT_STATUS_VSYNC) {
727 /* vsync interrupt use different bit for read and clear */
728 val |= MXR_INT_CLEAR_VSYNC;
729 val &= ~MXR_INT_STATUS_VSYNC;
730
721 /* interlace scan need to check shadow register */ 731 /* interlace scan need to check shadow register */
722 if (ctx->interlace) { 732 if (ctx->interlace) {
723 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); 733 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -731,8 +741,8 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
731 goto out; 741 goto out;
732 } 742 }
733 743
734 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 744 drm_crtc_handle_vblank(&ctx->crtc->base);
735 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 745 exynos_drm_crtc_finish_pageflip(ctx->crtc);
736 746
737 /* set wait vsync event to zero and wake up queue. */ 747 /* set wait vsync event to zero and wake up queue. */
738 if (atomic_read(&ctx->wait_vsync_event)) { 748 if (atomic_read(&ctx->wait_vsync_event)) {
@@ -743,11 +753,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
743 753
744out: 754out:
745 /* clear interrupts */ 755 /* clear interrupts */
746 if (~val & MXR_INT_EN_VSYNC) {
747 /* vsync interrupt use different bit for read and clear */
748 val &= ~MXR_INT_EN_VSYNC;
749 val |= MXR_INT_CLEAR_VSYNC;
750 }
751 mixer_reg_write(res, MXR_INT_STATUS, val); 756 mixer_reg_write(res, MXR_INT_STATUS, val);
752 757
753 spin_unlock(&res->reg_slock); 758 spin_unlock(&res->reg_slock);
@@ -882,8 +887,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
882 } 887 }
883 } 888 }
884 889
885 ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev, 890 ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
886 mixer_ctx->dev);
887 if (ret) 891 if (ret)
888 priv->pipe--; 892 priv->pipe--;
889 893
@@ -892,8 +896,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
892 896
893static void mixer_ctx_remove(struct mixer_context *mixer_ctx) 897static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
894{ 898{
895 if (is_drm_iommu_supported(mixer_ctx->drm_dev)) 899 drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
896 drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
897} 900}
898 901
899static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) 902static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
@@ -901,14 +904,13 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
901 struct mixer_context *mixer_ctx = crtc->ctx; 904 struct mixer_context *mixer_ctx = crtc->ctx;
902 struct mixer_resources *res = &mixer_ctx->mixer_res; 905 struct mixer_resources *res = &mixer_ctx->mixer_res;
903 906
904 if (!mixer_ctx->powered) { 907 __set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
905 mixer_ctx->int_en |= MXR_INT_EN_VSYNC; 908 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
906 return 0; 909 return 0;
907 }
908 910
909 /* enable vsync interrupt */ 911 /* enable vsync interrupt */
910 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, 912 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
911 MXR_INT_EN_VSYNC); 913 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
912 914
913 return 0; 915 return 0;
914} 916}
@@ -918,48 +920,48 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
918 struct mixer_context *mixer_ctx = crtc->ctx; 920 struct mixer_context *mixer_ctx = crtc->ctx;
919 struct mixer_resources *res = &mixer_ctx->mixer_res; 921 struct mixer_resources *res = &mixer_ctx->mixer_res;
920 922
923 __clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
924
925 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
926 return;
927
921 /* disable vsync interrupt */ 928 /* disable vsync interrupt */
929 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
922 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 930 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
923} 931}
924 932
925static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) 933static void mixer_update_plane(struct exynos_drm_crtc *crtc,
934 struct exynos_drm_plane *plane)
926{ 935{
927 struct mixer_context *mixer_ctx = crtc->ctx; 936 struct mixer_context *mixer_ctx = crtc->ctx;
928 937
929 DRM_DEBUG_KMS("win: %d\n", win); 938 DRM_DEBUG_KMS("win: %d\n", plane->zpos);
930 939
931 mutex_lock(&mixer_ctx->mixer_mutex); 940 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
932 if (!mixer_ctx->powered) {
933 mutex_unlock(&mixer_ctx->mixer_mutex);
934 return; 941 return;
935 }
936 mutex_unlock(&mixer_ctx->mixer_mutex);
937 942
938 if (win > 1 && mixer_ctx->vp_enabled) 943 if (plane->zpos > 1 && mixer_ctx->vp_enabled)
939 vp_video_buffer(mixer_ctx, win); 944 vp_video_buffer(mixer_ctx, plane);
940 else 945 else
941 mixer_graph_buffer(mixer_ctx, win); 946 mixer_graph_buffer(mixer_ctx, plane);
942} 947}
943 948
944static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 949static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
950 struct exynos_drm_plane *plane)
945{ 951{
946 struct mixer_context *mixer_ctx = crtc->ctx; 952 struct mixer_context *mixer_ctx = crtc->ctx;
947 struct mixer_resources *res = &mixer_ctx->mixer_res; 953 struct mixer_resources *res = &mixer_ctx->mixer_res;
948 unsigned long flags; 954 unsigned long flags;
949 955
950 DRM_DEBUG_KMS("win: %d\n", win); 956 DRM_DEBUG_KMS("win: %d\n", plane->zpos);
951 957
952 mutex_lock(&mixer_ctx->mixer_mutex); 958 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
953 if (!mixer_ctx->powered) {
954 mutex_unlock(&mixer_ctx->mixer_mutex);
955 return; 959 return;
956 }
957 mutex_unlock(&mixer_ctx->mixer_mutex);
958 960
959 spin_lock_irqsave(&res->reg_slock, flags); 961 spin_lock_irqsave(&res->reg_slock, flags);
960 mixer_vsync_set_update(mixer_ctx, false); 962 mixer_vsync_set_update(mixer_ctx, false);
961 963
962 mixer_cfg_layer(mixer_ctx, win, false); 964 mixer_cfg_layer(mixer_ctx, plane->zpos, false);
963 965
964 mixer_vsync_set_update(mixer_ctx, true); 966 mixer_vsync_set_update(mixer_ctx, true);
965 spin_unlock_irqrestore(&res->reg_slock, flags); 967 spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -970,12 +972,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
970 struct mixer_context *mixer_ctx = crtc->ctx; 972 struct mixer_context *mixer_ctx = crtc->ctx;
971 int err; 973 int err;
972 974
973 mutex_lock(&mixer_ctx->mixer_mutex); 975 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
974 if (!mixer_ctx->powered) {
975 mutex_unlock(&mixer_ctx->mixer_mutex);
976 return; 976 return;
977 }
978 mutex_unlock(&mixer_ctx->mixer_mutex);
979 977
980 err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe); 978 err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
981 if (err < 0) { 979 if (err < 0) {
@@ -1003,13 +1001,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1003 struct mixer_resources *res = &ctx->mixer_res; 1001 struct mixer_resources *res = &ctx->mixer_res;
1004 int ret; 1002 int ret;
1005 1003
1006 mutex_lock(&ctx->mixer_mutex); 1004 if (test_bit(MXR_BIT_POWERED, &ctx->flags))
1007 if (ctx->powered) {
1008 mutex_unlock(&ctx->mixer_mutex);
1009 return; 1005 return;
1010 }
1011
1012 mutex_unlock(&ctx->mixer_mutex);
1013 1006
1014 pm_runtime_get_sync(ctx->dev); 1007 pm_runtime_get_sync(ctx->dev);
1015 1008
@@ -1041,13 +1034,14 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1041 } 1034 }
1042 } 1035 }
1043 1036
1044 mutex_lock(&ctx->mixer_mutex); 1037 set_bit(MXR_BIT_POWERED, &ctx->flags);
1045 ctx->powered = true;
1046 mutex_unlock(&ctx->mixer_mutex);
1047 1038
1048 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1039 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1049 1040
1050 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1041 if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
1042 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
1043 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
1044 }
1051 mixer_win_reset(ctx); 1045 mixer_win_reset(ctx);
1052} 1046}
1053 1047
@@ -1057,24 +1051,16 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
1057 struct mixer_resources *res = &ctx->mixer_res; 1051 struct mixer_resources *res = &ctx->mixer_res;
1058 int i; 1052 int i;
1059 1053
1060 mutex_lock(&ctx->mixer_mutex); 1054 if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
1061 if (!ctx->powered) {
1062 mutex_unlock(&ctx->mixer_mutex);
1063 return; 1055 return;
1064 }
1065 mutex_unlock(&ctx->mixer_mutex);
1066 1056
1067 mixer_stop(ctx); 1057 mixer_stop(ctx);
1068 mixer_regs_dump(ctx); 1058 mixer_regs_dump(ctx);
1069 1059
1070 for (i = 0; i < MIXER_WIN_NR; i++) 1060 for (i = 0; i < MIXER_WIN_NR; i++)
1071 mixer_win_disable(crtc, i); 1061 mixer_disable_plane(crtc, &ctx->planes[i]);
1072
1073 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
1074 1062
1075 mutex_lock(&ctx->mixer_mutex); 1063 clear_bit(MXR_BIT_POWERED, &ctx->flags);
1076 ctx->powered = false;
1077 mutex_unlock(&ctx->mixer_mutex);
1078 1064
1079 clk_disable_unprepare(res->hdmi); 1065 clk_disable_unprepare(res->hdmi);
1080 clk_disable_unprepare(res->mixer); 1066 clk_disable_unprepare(res->mixer);
@@ -1113,8 +1099,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1113 .enable_vblank = mixer_enable_vblank, 1099 .enable_vblank = mixer_enable_vblank,
1114 .disable_vblank = mixer_disable_vblank, 1100 .disable_vblank = mixer_disable_vblank,
1115 .wait_for_vblank = mixer_wait_for_vblank, 1101 .wait_for_vblank = mixer_wait_for_vblank,
1116 .win_commit = mixer_win_commit, 1102 .update_plane = mixer_update_plane,
1117 .win_disable = mixer_win_disable, 1103 .disable_plane = mixer_disable_plane,
1118}; 1104};
1119 1105
1120static struct mixer_drv_data exynos5420_mxr_drv_data = { 1106static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1236,8 +1222,6 @@ static int mixer_probe(struct platform_device *pdev)
1236 return -ENOMEM; 1222 return -ENOMEM;
1237 } 1223 }
1238 1224
1239 mutex_init(&ctx->mixer_mutex);
1240
1241 if (dev->of_node) { 1225 if (dev->of_node) {
1242 const struct of_device_id *match; 1226 const struct of_device_id *match;
1243 1227
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
new file mode 100644
index 000000000000..c78cf3f605d0
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -0,0 +1,18 @@
1config DRM_FSL_DCU
2 tristate "DRM Support for Freescale DCU"
3 depends on DRM && OF && ARM
4 select BACKLIGHT_CLASS_DEVICE
5 select BACKLIGHT_LCD_SUPPORT
6 select DRM_KMS_HELPER
7 select DRM_KMS_CMA_HELPER
8 select DRM_KMS_FB_HELPER
9 select DRM_PANEL
10 select FB_SYS_FILLRECT
11 select FB_SYS_COPYAREA
12 select FB_SYS_IMAGEBLIT
13 select FB_SYS_FOPS
14 select REGMAP_MMIO
15 select VIDEOMODE_HELPERS
16 help
17 Choose this option if you have an Freescale DCU chipset.
18 If M is selected the module will be called fsl-dcu-drm.
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
new file mode 100644
index 000000000000..6ea1523ae6ec
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -0,0 +1,7 @@
1fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
2 fsl_dcu_drm_kms.o \
3 fsl_dcu_drm_rgb.o \
4 fsl_dcu_drm_plane.o \
5 fsl_dcu_drm_crtc.o \
6 fsl_dcu_drm_fbdev.o
7obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
new file mode 100644
index 000000000000..82a3d311e164
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/regmap.h>
14
15#include <drm/drmP.h>
16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
20
21#include "fsl_dcu_drm_crtc.h"
22#include "fsl_dcu_drm_drv.h"
23#include "fsl_dcu_drm_plane.h"
24
25static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
26 struct drm_crtc_state *old_crtc_state)
27{
28}
29
30static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
31 struct drm_crtc_state *state)
32{
33 return 0;
34}
35
36static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
37 struct drm_crtc_state *old_crtc_state)
38{
39}
40
41static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
42{
43 struct drm_device *dev = crtc->dev;
44 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
45 int ret;
46
47 ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
48 DCU_MODE_DCU_MODE_MASK,
49 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
50 if (ret)
51 dev_err(fsl_dev->dev, "Disable CRTC failed\n");
52 ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
53 DCU_UPDATE_MODE_READREG);
54 if (ret)
55 dev_err(fsl_dev->dev, "Enable CRTC failed\n");
56}
57
58static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
59{
60 struct drm_device *dev = crtc->dev;
61 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
62 int ret;
63
64 ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
65 DCU_MODE_DCU_MODE_MASK,
66 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
67 if (ret)
68 dev_err(fsl_dev->dev, "Enable CRTC failed\n");
69 ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
70 DCU_UPDATE_MODE_READREG);
71 if (ret)
72 dev_err(fsl_dev->dev, "Enable CRTC failed\n");
73}
74
75static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
76 const struct drm_display_mode *mode,
77 struct drm_display_mode *adjusted_mode)
78{
79 return true;
80}
81
82static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
83{
84 struct drm_device *dev = crtc->dev;
85 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
86 struct drm_display_mode *mode = &crtc->state->mode;
87 unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
88 unsigned long dcuclk;
89 int ret;
90
91 index = drm_crtc_index(crtc);
92 dcuclk = clk_get_rate(fsl_dev->clk);
93 div = dcuclk / mode->clock / 1000;
94
95 /* Configure timings: */
96 hbp = mode->htotal - mode->hsync_end;
97 hfp = mode->hsync_start - mode->hdisplay;
98 hsw = mode->hsync_end - mode->hsync_start;
99 vbp = mode->vtotal - mode->vsync_end;
100 vfp = mode->vsync_start - mode->vdisplay;
101 vsw = mode->vsync_end - mode->vsync_start;
102
103 ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
104 DCU_HSYN_PARA_BP(hbp) |
105 DCU_HSYN_PARA_PW(hsw) |
106 DCU_HSYN_PARA_FP(hfp));
107 if (ret)
108 goto set_failed;
109 ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
110 DCU_VSYN_PARA_BP(vbp) |
111 DCU_VSYN_PARA_PW(vsw) |
112 DCU_VSYN_PARA_FP(vfp));
113 if (ret)
114 goto set_failed;
115 ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
116 DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
117 DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
118 if (ret)
119 goto set_failed;
120 ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
121 if (ret)
122 goto set_failed;
123 ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
124 DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
125 if (ret)
126 goto set_failed;
127 ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
128 DCU_BGND_G(0) | DCU_BGND_B(0));
129 if (ret)
130 goto set_failed;
131 ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
132 DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
133 if (ret)
134 goto set_failed;
135 ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
136 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
137 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
138 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
139 if (ret)
140 goto set_failed;
141 ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
142 DCU_UPDATE_MODE_READREG);
143 if (ret)
144 goto set_failed;
145 return;
146set_failed:
147 dev_err(dev->dev, "set DCU register failed\n");
148}
149
150static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
151 .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
152 .atomic_check = fsl_dcu_drm_crtc_atomic_check,
153 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
154 .disable = fsl_dcu_drm_disable_crtc,
155 .enable = fsl_dcu_drm_crtc_enable,
156 .mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
157 .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
158};
159
160static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = {
161 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
162 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
163 .destroy = drm_crtc_cleanup,
164 .page_flip = drm_atomic_helper_page_flip,
165 .reset = drm_atomic_helper_crtc_reset,
166 .set_config = drm_atomic_helper_set_config,
167};
168
169int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
170{
171 struct drm_plane *primary;
172 struct drm_crtc *crtc = &fsl_dev->crtc;
173 unsigned int i, j, reg_num;
174 int ret;
175
176 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
177 ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
178 &fsl_dcu_drm_crtc_funcs);
179 if (ret < 0)
180 return ret;
181
182 drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
183
184 if (!strcmp(fsl_dev->soc->name, "ls1021a"))
185 reg_num = LS1021A_LAYER_REG_NUM;
186 else
187 reg_num = VF610_LAYER_REG_NUM;
188 for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
189 for (j = 0; j < reg_num; j++) {
190 ret = regmap_write(fsl_dev->regmap,
191 DCU_CTRLDESCLN(i, j), 0);
192 if (ret)
193 goto init_failed;
194 }
195 }
196 ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
197 DCU_MODE_DCU_MODE_MASK,
198 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
199 if (ret)
200 goto init_failed;
201 ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
202 DCU_UPDATE_MODE_READREG);
203 if (ret)
204 goto init_failed;
205
206 return 0;
207init_failed:
208 dev_err(fsl_dev->dev, "init DCU register failed\n");
209 return ret;
210}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
new file mode 100644
index 000000000000..43d4da2c5fe5
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __FSL_DCU_DRM_CRTC_H__
13#define __FSL_DCU_DRM_CRTC_H__
14
15struct fsl_dcu_drm_device;
16
17int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev);
18
19#endif /* __FSL_DCU_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
new file mode 100644
index 000000000000..9a8e2da47158
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -0,0 +1,404 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/io.h>
15#include <linux/mfd/syscon.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/of_platform.h>
19#include <linux/platform_device.h>
20#include <linux/pm.h>
21#include <linux/pm_runtime.h>
22#include <linux/regmap.h>
23
24#include <drm/drmP.h>
25#include <drm/drm_crtc_helper.h>
26#include <drm/drm_gem_cma_helper.h>
27
28#include "fsl_dcu_drm_crtc.h"
29#include "fsl_dcu_drm_drv.h"
30
31static const struct regmap_config fsl_dcu_regmap_config = {
32 .reg_bits = 32,
33 .reg_stride = 4,
34 .val_bits = 32,
35 .cache_type = REGCACHE_RBTREE,
36};
37
38static int fsl_dcu_drm_irq_init(struct drm_device *dev)
39{
40 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
41 unsigned int value;
42 int ret;
43
44 ret = drm_irq_install(dev, fsl_dev->irq);
45 if (ret < 0)
46 dev_err(dev->dev, "failed to install IRQ handler\n");
47
48 ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
49 if (ret)
50 dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
51 ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
52 if (ret)
53 dev_err(dev->dev, "read DCU_INT_MASK failed\n");
54 value &= DCU_INT_MASK_VBLANK;
55 ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
56 if (ret)
57 dev_err(dev->dev, "set DCU_INT_MASK failed\n");
58 ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
59 DCU_UPDATE_MODE_READREG);
60 if (ret)
61 dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
62
63 return ret;
64}
65
66static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
67{
68 struct device *dev = drm->dev;
69 struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
70 int ret;
71
72 ret = fsl_dcu_drm_modeset_init(fsl_dev);
73 if (ret < 0) {
74 dev_err(dev, "failed to initialize mode setting\n");
75 return ret;
76 }
77
78 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
79 if (ret < 0) {
80 dev_err(dev, "failed to initialize vblank\n");
81 goto done;
82 }
83 drm->vblank_disable_allowed = true;
84
85 ret = fsl_dcu_drm_irq_init(drm);
86 if (ret < 0)
87 goto done;
88 drm->irq_enabled = true;
89
90 fsl_dcu_fbdev_init(drm);
91
92 return 0;
93done:
94 if (ret) {
95 drm_mode_config_cleanup(drm);
96 drm_vblank_cleanup(drm);
97 drm_irq_uninstall(drm);
98 drm->dev_private = NULL;
99 }
100
101 return ret;
102}
103
104static int fsl_dcu_unload(struct drm_device *dev)
105{
106 drm_mode_config_cleanup(dev);
107 drm_vblank_cleanup(dev);
108 drm_irq_uninstall(dev);
109
110 dev->dev_private = NULL;
111
112 return 0;
113}
114
115static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
116{
117}
118
119static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
120{
121 struct drm_device *dev = arg;
122 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
123 unsigned int int_status;
124 int ret;
125
126 ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
127 if (ret)
128 dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
129 if (int_status & DCU_INT_STATUS_VBLANK)
130 drm_handle_vblank(dev, 0);
131
132 ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
133 if (ret)
134 dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
135 ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
136 DCU_UPDATE_MODE_READREG);
137 if (ret)
138 dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
139
140 return IRQ_HANDLED;
141}
142
143static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
144{
145 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
146 unsigned int value;
147 int ret;
148
149 ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
150 if (ret)
151 dev_err(dev->dev, "read DCU_INT_MASK failed\n");
152 value &= ~DCU_INT_MASK_VBLANK;
153 ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
154 if (ret)
155 dev_err(dev->dev, "set DCU_INT_MASK failed\n");
156 return 0;
157}
158
159static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc)
160{
161 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
162 unsigned int value;
163 int ret;
164
165 ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
166 if (ret)
167 dev_err(dev->dev, "read DCU_INT_MASK failed\n");
168 value |= DCU_INT_MASK_VBLANK;
169 ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
170 if (ret)
171 dev_err(dev->dev, "set DCU_INT_MASK failed\n");
172}
173
174static const struct file_operations fsl_dcu_drm_fops = {
175 .owner = THIS_MODULE,
176 .open = drm_open,
177 .release = drm_release,
178 .unlocked_ioctl = drm_ioctl,
179#ifdef CONFIG_COMPAT
180 .compat_ioctl = drm_compat_ioctl,
181#endif
182 .poll = drm_poll,
183 .read = drm_read,
184 .llseek = no_llseek,
185 .mmap = drm_gem_cma_mmap,
186};
187
188static struct drm_driver fsl_dcu_drm_driver = {
189 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
190 | DRIVER_PRIME | DRIVER_ATOMIC,
191 .load = fsl_dcu_load,
192 .unload = fsl_dcu_unload,
193 .preclose = fsl_dcu_drm_preclose,
194 .irq_handler = fsl_dcu_drm_irq,
195 .get_vblank_counter = drm_vblank_count,
196 .enable_vblank = fsl_dcu_drm_enable_vblank,
197 .disable_vblank = fsl_dcu_drm_disable_vblank,
198 .gem_free_object = drm_gem_cma_free_object,
199 .gem_vm_ops = &drm_gem_cma_vm_ops,
200 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
201 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
202 .gem_prime_import = drm_gem_prime_import,
203 .gem_prime_export = drm_gem_prime_export,
204 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
205 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
206 .gem_prime_vmap = drm_gem_cma_prime_vmap,
207 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
208 .gem_prime_mmap = drm_gem_cma_prime_mmap,
209 .dumb_create = drm_gem_cma_dumb_create,
210 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
211 .dumb_destroy = drm_gem_dumb_destroy,
212 .fops = &fsl_dcu_drm_fops,
213 .name = "fsl-dcu-drm",
214 .desc = "Freescale DCU DRM",
215 .date = "20150213",
216 .major = 1,
217 .minor = 0,
218};
219
220#ifdef CONFIG_PM_SLEEP
221static int fsl_dcu_drm_pm_suspend(struct device *dev)
222{
223 struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
224
225 if (!fsl_dev)
226 return 0;
227
228 drm_kms_helper_poll_disable(fsl_dev->drm);
229 regcache_cache_only(fsl_dev->regmap, true);
230 regcache_mark_dirty(fsl_dev->regmap);
231 clk_disable(fsl_dev->clk);
232 clk_unprepare(fsl_dev->clk);
233
234 return 0;
235}
236
237static int fsl_dcu_drm_pm_resume(struct device *dev)
238{
239 struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
240 int ret;
241
242 if (!fsl_dev)
243 return 0;
244
245 ret = clk_enable(fsl_dev->clk);
246 if (ret < 0) {
247 dev_err(dev, "failed to enable dcu clk\n");
248 clk_unprepare(fsl_dev->clk);
249 return ret;
250 }
251 ret = clk_prepare(fsl_dev->clk);
252 if (ret < 0) {
253 dev_err(dev, "failed to prepare dcu clk\n");
254 return ret;
255 }
256
257 drm_kms_helper_poll_enable(fsl_dev->drm);
258 regcache_cache_only(fsl_dev->regmap, false);
259 regcache_sync(fsl_dev->regmap);
260
261 return 0;
262}
263#endif
264
265static const struct dev_pm_ops fsl_dcu_drm_pm_ops = {
266 SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume)
267};
268
269static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
270 .name = "ls1021a",
271 .total_layer = 16,
272 .max_layer = 4,
273};
274
275static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
276 .name = "vf610",
277 .total_layer = 64,
278 .max_layer = 6,
279};
280
281static const struct of_device_id fsl_dcu_of_match[] = {
282 {
283 .compatible = "fsl,ls1021a-dcu",
284 .data = &fsl_dcu_ls1021a_data,
285 }, {
286 .compatible = "fsl,vf610-dcu",
287 .data = &fsl_dcu_vf610_data,
288 }, {
289 },
290};
291MODULE_DEVICE_TABLE(of, fsl_dcu_of_match);
292
293static int fsl_dcu_drm_probe(struct platform_device *pdev)
294{
295 struct fsl_dcu_drm_device *fsl_dev;
296 struct drm_device *drm;
297 struct device *dev = &pdev->dev;
298 struct resource *res;
299 void __iomem *base;
300 struct drm_driver *driver = &fsl_dcu_drm_driver;
301 const struct of_device_id *id;
302 int ret;
303
304 fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
305 if (!fsl_dev)
306 return -ENOMEM;
307
308 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
309 if (!res) {
310 dev_err(dev, "could not get memory IO resource\n");
311 return -ENODEV;
312 }
313
314 base = devm_ioremap_resource(dev, res);
315 if (IS_ERR(base)) {
316 ret = PTR_ERR(base);
317 return ret;
318 }
319
320 fsl_dev->irq = platform_get_irq(pdev, 0);
321 if (fsl_dev->irq < 0) {
322 dev_err(dev, "failed to get irq\n");
323 return -ENXIO;
324 }
325
326 fsl_dev->clk = devm_clk_get(dev, "dcu");
327 if (IS_ERR(fsl_dev->clk)) {
328 ret = PTR_ERR(fsl_dev->clk);
329 dev_err(dev, "failed to get dcu clock\n");
330 return ret;
331 }
332 ret = clk_prepare(fsl_dev->clk);
333 if (ret < 0) {
334 dev_err(dev, "failed to prepare dcu clk\n");
335 return ret;
336 }
337 ret = clk_enable(fsl_dev->clk);
338 if (ret < 0) {
339 dev_err(dev, "failed to enable dcu clk\n");
340 clk_unprepare(fsl_dev->clk);
341 return ret;
342 }
343
344 fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
345 &fsl_dcu_regmap_config);
346 if (IS_ERR(fsl_dev->regmap)) {
347 dev_err(dev, "regmap init failed\n");
348 return PTR_ERR(fsl_dev->regmap);
349 }
350
351 id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
352 if (!id)
353 return -ENODEV;
354 fsl_dev->soc = id->data;
355
356 drm = drm_dev_alloc(driver, dev);
357 if (!drm)
358 return -ENOMEM;
359
360 fsl_dev->dev = dev;
361 fsl_dev->drm = drm;
362 fsl_dev->np = dev->of_node;
363 drm->dev_private = fsl_dev;
364 dev_set_drvdata(dev, fsl_dev);
365 drm_dev_set_unique(drm, dev_name(dev));
366
367 ret = drm_dev_register(drm, 0);
368 if (ret < 0)
369 goto unref;
370
371 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
372 driver->major, driver->minor, driver->patchlevel,
373 driver->date, drm->primary->index);
374
375 return 0;
376
377unref:
378 drm_dev_unref(drm);
379 return ret;
380}
381
382static int fsl_dcu_drm_remove(struct platform_device *pdev)
383{
384 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
385
386 drm_put_dev(fsl_dev->drm);
387
388 return 0;
389}
390
391static struct platform_driver fsl_dcu_drm_platform_driver = {
392 .probe = fsl_dcu_drm_probe,
393 .remove = fsl_dcu_drm_remove,
394 .driver = {
395 .name = "fsl-dcu",
396 .pm = &fsl_dcu_drm_pm_ops,
397 .of_match_table = fsl_dcu_of_match,
398 },
399};
400
401module_platform_driver(fsl_dcu_drm_platform_driver);
402
403MODULE_DESCRIPTION("Freescale DCU DRM Driver");
404MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
new file mode 100644
index 000000000000..579b9e44e764
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -0,0 +1,197 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __FSL_DCU_DRM_DRV_H__
13#define __FSL_DCU_DRM_DRV_H__
14
15#include "fsl_dcu_drm_crtc.h"
16#include "fsl_dcu_drm_output.h"
17#include "fsl_dcu_drm_plane.h"
18
19#define DCU_DCU_MODE 0x0010
20#define DCU_MODE_BLEND_ITER(x) ((x) << 20)
21#define DCU_MODE_RASTER_EN BIT(14)
22#define DCU_MODE_DCU_MODE(x) (x)
23#define DCU_MODE_DCU_MODE_MASK 0x03
24#define DCU_MODE_OFF 0
25#define DCU_MODE_NORMAL 1
26#define DCU_MODE_TEST 2
27#define DCU_MODE_COLORBAR 3
28
29#define DCU_BGND 0x0014
30#define DCU_BGND_R(x) ((x) << 16)
31#define DCU_BGND_G(x) ((x) << 8)
32#define DCU_BGND_B(x) (x)
33
34#define DCU_DISP_SIZE 0x0018
35#define DCU_DISP_SIZE_DELTA_Y(x) ((x) << 16)
36/*Regisiter value 1/16 of horizontal resolution*/
37#define DCU_DISP_SIZE_DELTA_X(x) ((x) >> 4)
38
39#define DCU_HSYN_PARA 0x001c
40#define DCU_HSYN_PARA_BP(x) ((x) << 22)
41#define DCU_HSYN_PARA_PW(x) ((x) << 11)
42#define DCU_HSYN_PARA_FP(x) (x)
43
44#define DCU_VSYN_PARA 0x0020
45#define DCU_VSYN_PARA_BP(x) ((x) << 22)
46#define DCU_VSYN_PARA_PW(x) ((x) << 11)
47#define DCU_VSYN_PARA_FP(x) (x)
48
49#define DCU_SYN_POL 0x0024
50#define DCU_SYN_POL_INV_PXCK_FALL (0 << 6)
51#define DCU_SYN_POL_NEG_REMAIN (0 << 5)
52#define DCU_SYN_POL_INV_VS_LOW BIT(1)
53#define DCU_SYN_POL_INV_HS_LOW BIT(0)
54
55#define DCU_THRESHOLD 0x0028
56#define DCU_THRESHOLD_LS_BF_VS(x) ((x) << 16)
57#define DCU_THRESHOLD_OUT_BUF_HIGH(x) ((x) << 8)
58#define DCU_THRESHOLD_OUT_BUF_LOW(x) (x)
59#define BF_VS_VAL 0x03
60#define BUF_MAX_VAL 0x78
61#define BUF_MIN_VAL 0x0a
62
63#define DCU_INT_STATUS 0x002C
64#define DCU_INT_STATUS_VSYNC BIT(0)
65#define DCU_INT_STATUS_UNDRUN BIT(1)
66#define DCU_INT_STATUS_LSBFVS BIT(2)
67#define DCU_INT_STATUS_VBLANK BIT(3)
68#define DCU_INT_STATUS_CRCREADY BIT(4)
69#define DCU_INT_STATUS_CRCOVERFLOW BIT(5)
70#define DCU_INT_STATUS_P1FIFOLO BIT(6)
71#define DCU_INT_STATUS_P1FIFOHI BIT(7)
72#define DCU_INT_STATUS_P2FIFOLO BIT(8)
73#define DCU_INT_STATUS_P2FIFOHI BIT(9)
74#define DCU_INT_STATUS_PROGEND BIT(10)
75#define DCU_INT_STATUS_IPMERROR BIT(11)
76#define DCU_INT_STATUS_LYRTRANS BIT(12)
77#define DCU_INT_STATUS_DMATRANS BIT(14)
78#define DCU_INT_STATUS_P3FIFOLO BIT(16)
79#define DCU_INT_STATUS_P3FIFOHI BIT(17)
80#define DCU_INT_STATUS_P4FIFOLO BIT(18)
81#define DCU_INT_STATUS_P4FIFOHI BIT(19)
82#define DCU_INT_STATUS_P1EMPTY BIT(26)
83#define DCU_INT_STATUS_P2EMPTY BIT(27)
84#define DCU_INT_STATUS_P3EMPTY BIT(28)
85#define DCU_INT_STATUS_P4EMPTY BIT(29)
86
87#define DCU_INT_MASK 0x0030
88#define DCU_INT_MASK_VSYNC BIT(0)
89#define DCU_INT_MASK_UNDRUN BIT(1)
90#define DCU_INT_MASK_LSBFVS BIT(2)
91#define DCU_INT_MASK_VBLANK BIT(3)
92#define DCU_INT_MASK_CRCREADY BIT(4)
93#define DCU_INT_MASK_CRCOVERFLOW BIT(5)
94#define DCU_INT_MASK_P1FIFOLO BIT(6)
95#define DCU_INT_MASK_P1FIFOHI BIT(7)
96#define DCU_INT_MASK_P2FIFOLO BIT(8)
97#define DCU_INT_MASK_P2FIFOHI BIT(9)
98#define DCU_INT_MASK_PROGEND BIT(10)
99#define DCU_INT_MASK_IPMERROR BIT(11)
100#define DCU_INT_MASK_LYRTRANS BIT(12)
101#define DCU_INT_MASK_DMATRANS BIT(14)
102#define DCU_INT_MASK_P3FIFOLO BIT(16)
103#define DCU_INT_MASK_P3FIFOHI BIT(17)
104#define DCU_INT_MASK_P4FIFOLO BIT(18)
105#define DCU_INT_MASK_P4FIFOHI BIT(19)
106#define DCU_INT_MASK_P1EMPTY BIT(26)
107#define DCU_INT_MASK_P2EMPTY BIT(27)
108#define DCU_INT_MASK_P3EMPTY BIT(28)
109#define DCU_INT_MASK_P4EMPTY BIT(29)
110
111#define DCU_DIV_RATIO 0x0054
112
113#define DCU_UPDATE_MODE 0x00cc
114#define DCU_UPDATE_MODE_MODE BIT(31)
115#define DCU_UPDATE_MODE_READREG BIT(30)
116
117#define DCU_DCFB_MAX 0x300
118
119#define DCU_CTRLDESCLN(layer, reg) (0x200 + (reg - 1) * 4 + (layer) * 0x40)
120
121#define DCU_LAYER_HEIGHT(x) ((x) << 16)
122#define DCU_LAYER_WIDTH(x) (x)
123
124#define DCU_LAYER_POSY(x) ((x) << 16)
125#define DCU_LAYER_POSX(x) (x)
126
127#define DCU_LAYER_EN BIT(31)
128#define DCU_LAYER_TILE_EN BIT(30)
129#define DCU_LAYER_DATA_SEL_CLUT BIT(29)
130#define DCU_LAYER_SAFETY_EN BIT(28)
131#define DCU_LAYER_TRANS(x) ((x) << 20)
132#define DCU_LAYER_BPP(x) ((x) << 16)
133#define DCU_LAYER_RLE_EN BIT(15)
134#define DCU_LAYER_LUOFFS(x) ((x) << 4)
135#define DCU_LAYER_BB_ON BIT(2)
136#define DCU_LAYER_AB(x) (x)
137
138#define DCU_LAYER_CKMAX_R(x) ((x) << 16)
139#define DCU_LAYER_CKMAX_G(x) ((x) << 8)
140#define DCU_LAYER_CKMAX_B(x) (x)
141
142#define DCU_LAYER_CKMIN_R(x) ((x) << 16)
143#define DCU_LAYER_CKMIN_G(x) ((x) << 8)
144#define DCU_LAYER_CKMIN_B(x) (x)
145
146#define DCU_LAYER_TILE_VER(x) ((x) << 16)
147#define DCU_LAYER_TILE_HOR(x) (x)
148
149#define DCU_LAYER_FG_FCOLOR(x) (x)
150
151#define DCU_LAYER_BG_BCOLOR(x) (x)
152
153#define DCU_LAYER_POST_SKIP(x) ((x) << 16)
154#define DCU_LAYER_PRE_SKIP(x) (x)
155
156#define FSL_DCU_RGB565 4
157#define FSL_DCU_RGB888 5
158#define FSL_DCU_ARGB8888 6
159#define FSL_DCU_ARGB1555 11
160#define FSL_DCU_ARGB4444 12
161#define FSL_DCU_YUV422 14
162
163#define VF610_LAYER_REG_NUM 9
164#define LS1021A_LAYER_REG_NUM 10
165
166struct clk;
167struct device;
168struct drm_device;
169
170struct fsl_dcu_soc_data {
171 const char *name;
172 /*total layer number*/
173 unsigned int total_layer;
174 /*max layer number DCU supported*/
175 unsigned int max_layer;
176};
177
178struct fsl_dcu_drm_device {
179 struct device *dev;
180 struct device_node *np;
181 struct regmap *regmap;
182 int irq;
183 struct clk *clk;
184 /*protects hardware register*/
185 spinlock_t irq_lock;
186 struct drm_device *drm;
187 struct drm_fbdev_cma *fbdev;
188 struct drm_crtc crtc;
189 struct drm_encoder encoder;
190 struct fsl_dcu_drm_connector connector;
191 const struct fsl_dcu_soc_data *soc;
192};
193
194void fsl_dcu_fbdev_init(struct drm_device *dev);
195int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
196
197#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
new file mode 100644
index 000000000000..8b8b819ea704
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
@@ -0,0 +1,23 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/drm_fb_cma_helper.h>
14
15#include "fsl_dcu_drm_drv.h"
16
17/* initialize fbdev helper */
18void fsl_dcu_fbdev_init(struct drm_device *dev)
19{
20 struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
21
22 fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
23}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
new file mode 100644
index 000000000000..0ef5959710e7
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/drm_atomic_helper.h>
14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_fb_cma_helper.h>
16
17#include "fsl_dcu_drm_crtc.h"
18#include "fsl_dcu_drm_drv.h"
19
20static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
21 .atomic_check = drm_atomic_helper_check,
22 .atomic_commit = drm_atomic_helper_commit,
23 .fb_create = drm_fb_cma_create,
24};
25
26int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
27{
28 drm_mode_config_init(fsl_dev->drm);
29
30 fsl_dev->drm->mode_config.min_width = 0;
31 fsl_dev->drm->mode_config.min_height = 0;
32 fsl_dev->drm->mode_config.max_width = 2031;
33 fsl_dev->drm->mode_config.max_height = 2047;
34 fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
35
36 drm_kms_helper_poll_init(fsl_dev->drm);
37 fsl_dcu_drm_crtc_create(fsl_dev);
38 fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
39 fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
40 drm_mode_config_reset(fsl_dev->drm);
41
42 return 0;
43}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
new file mode 100644
index 000000000000..7093109fbc21
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __FSL_DCU_DRM_CONNECTOR_H__
13#define __FSL_DCU_DRM_CONNECTOR_H__
14
15struct fsl_dcu_drm_connector {
16 struct drm_connector base;
17 struct drm_encoder *encoder;
18 struct drm_panel *panel;
19};
20
21static inline struct fsl_dcu_drm_connector *
22to_fsl_dcu_connector(struct drm_connector *con)
23{
24 return con ? container_of(con, struct fsl_dcu_drm_connector, base)
25 : NULL;
26}
27
28int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
29 struct drm_encoder *encoder);
30int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
31 struct drm_crtc *crtc);
32
33#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
new file mode 100644
index 000000000000..82be6b86a168
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -0,0 +1,261 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/regmap.h>
13
14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_gem_cma_helper.h>
20#include <drm/drm_plane_helper.h>
21
22#include "fsl_dcu_drm_drv.h"
23#include "fsl_dcu_drm_plane.h"
24
25static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
26{
27 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
28 unsigned int total_layer = fsl_dev->soc->total_layer;
29 unsigned int index;
30
31 index = drm_plane_index(plane);
32 if (index < total_layer)
33 return total_layer - index - 1;
34
35 dev_err(fsl_dev->dev, "No more layer left\n");
36 return -EINVAL;
37}
38
39static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
40 struct drm_plane_state *state)
41{
42 struct drm_framebuffer *fb = state->fb;
43
44 switch (fb->pixel_format) {
45 case DRM_FORMAT_RGB565:
46 case DRM_FORMAT_RGB888:
47 case DRM_FORMAT_ARGB8888:
48 case DRM_FORMAT_BGRA4444:
49 case DRM_FORMAT_ARGB1555:
50 case DRM_FORMAT_YUV422:
51 return 0;
52 default:
53 return -EINVAL;
54 }
55}
56
57static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
58 struct drm_plane_state *old_state)
59{
60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
61 unsigned int index, value, ret;
62
63 index = fsl_dcu_drm_plane_index(plane);
64 if (index < 0)
65 return;
66
67 ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
68 if (ret)
69 dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
70 value &= ~DCU_LAYER_EN;
71 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
72 if (ret)
73 dev_err(fsl_dev->dev, "set DCU register failed\n");
74}
75
76static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
77 struct drm_plane_state *old_state)
78
79{
80 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
81 struct drm_plane_state *state = plane->state;
82 struct drm_framebuffer *fb = plane->state->fb;
83 struct drm_gem_cma_object *gem;
84 unsigned int alpha, bpp;
85 int index, ret;
86
87 if (!fb)
88 return;
89
90 index = fsl_dcu_drm_plane_index(plane);
91 if (index < 0)
92 return;
93
94 gem = drm_fb_cma_get_gem_obj(fb, 0);
95
96 switch (fb->pixel_format) {
97 case DRM_FORMAT_RGB565:
98 bpp = FSL_DCU_RGB565;
99 alpha = 0xff;
100 break;
101 case DRM_FORMAT_RGB888:
102 bpp = FSL_DCU_RGB888;
103 alpha = 0xff;
104 break;
105 case DRM_FORMAT_ARGB8888:
106 bpp = FSL_DCU_ARGB8888;
107 alpha = 0xff;
108 break;
109 case DRM_FORMAT_BGRA4444:
110 bpp = FSL_DCU_ARGB4444;
111 alpha = 0xff;
112 break;
113 case DRM_FORMAT_ARGB1555:
114 bpp = FSL_DCU_ARGB1555;
115 alpha = 0xff;
116 break;
117 case DRM_FORMAT_YUV422:
118 bpp = FSL_DCU_YUV422;
119 alpha = 0xff;
120 break;
121 default:
122 return;
123 }
124
125 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
126 DCU_LAYER_HEIGHT(state->crtc_h) |
127 DCU_LAYER_WIDTH(state->crtc_w));
128 if (ret)
129 goto set_failed;
130 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
131 DCU_LAYER_POSY(state->crtc_y) |
132 DCU_LAYER_POSX(state->crtc_x));
133 if (ret)
134 goto set_failed;
135 ret = regmap_write(fsl_dev->regmap,
136 DCU_CTRLDESCLN(index, 3), gem->paddr);
137 if (ret)
138 goto set_failed;
139 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
140 DCU_LAYER_EN |
141 DCU_LAYER_TRANS(alpha) |
142 DCU_LAYER_BPP(bpp) |
143 DCU_LAYER_AB(0));
144 if (ret)
145 goto set_failed;
146 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
147 DCU_LAYER_CKMAX_R(0xFF) |
148 DCU_LAYER_CKMAX_G(0xFF) |
149 DCU_LAYER_CKMAX_B(0xFF));
150 if (ret)
151 goto set_failed;
152 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
153 DCU_LAYER_CKMIN_R(0) |
154 DCU_LAYER_CKMIN_G(0) |
155 DCU_LAYER_CKMIN_B(0));
156 if (ret)
157 goto set_failed;
158 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
159 if (ret)
160 goto set_failed;
161 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
162 DCU_LAYER_FG_FCOLOR(0));
163 if (ret)
164 goto set_failed;
165 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
166 DCU_LAYER_BG_BCOLOR(0));
167 if (ret)
168 goto set_failed;
169 if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
170 ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
171 DCU_LAYER_POST_SKIP(0) |
172 DCU_LAYER_PRE_SKIP(0));
173 if (ret)
174 goto set_failed;
175 }
176 ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
177 DCU_MODE_DCU_MODE_MASK,
178 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
179 if (ret)
180 goto set_failed;
181 ret = regmap_write(fsl_dev->regmap,
182 DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
183 if (ret)
184 goto set_failed;
185 return;
186
187set_failed:
188 dev_err(fsl_dev->dev, "set DCU register failed\n");
189}
190
191static void
192fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
193 struct drm_framebuffer *fb,
194 const struct drm_plane_state *new_state)
195{
196}
197
198static int
199fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
200 struct drm_framebuffer *fb,
201 const struct drm_plane_state *new_state)
202{
203 return 0;
204}
205
206static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
207 .atomic_check = fsl_dcu_drm_plane_atomic_check,
208 .atomic_disable = fsl_dcu_drm_plane_atomic_disable,
209 .atomic_update = fsl_dcu_drm_plane_atomic_update,
210 .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
211 .prepare_fb = fsl_dcu_drm_plane_prepare_fb,
212};
213
214static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
215{
216 drm_plane_cleanup(plane);
217}
218
219static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
220 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
221 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
222 .destroy = fsl_dcu_drm_plane_destroy,
223 .disable_plane = drm_atomic_helper_disable_plane,
224 .reset = drm_atomic_helper_plane_reset,
225 .update_plane = drm_atomic_helper_update_plane,
226};
227
228static const u32 fsl_dcu_drm_plane_formats[] = {
229 DRM_FORMAT_RGB565,
230 DRM_FORMAT_RGB888,
231 DRM_FORMAT_ARGB8888,
232 DRM_FORMAT_ARGB4444,
233 DRM_FORMAT_ARGB1555,
234 DRM_FORMAT_YUV422,
235};
236
237struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
238{
239 struct drm_plane *primary;
240 int ret;
241
242 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
243 if (!primary) {
244 DRM_DEBUG_KMS("Failed to allocate primary plane\n");
245 return NULL;
246 }
247
248 /* possible_crtc's will be filled in later by crtc_init */
249 ret = drm_universal_plane_init(dev, primary, 0,
250 &fsl_dcu_drm_plane_funcs,
251 fsl_dcu_drm_plane_formats,
252 ARRAY_SIZE(fsl_dcu_drm_plane_formats),
253 DRM_PLANE_TYPE_PRIMARY);
254 if (ret) {
255 kfree(primary);
256 primary = NULL;
257 }
258 drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs);
259
260 return primary;
261}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
new file mode 100644
index 000000000000..d657f088d859
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __FSL_DCU_DRM_PLANE_H__
13#define __FSL_DCU_DRM_PLANE_H__
14
15struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
16
17#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
new file mode 100644
index 000000000000..fe8ab5da04fb
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2015 Freescale Semiconductor, Inc.
3 *
4 * Freescale DCU drm device driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/backlight.h>
13
14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_panel.h>
18
19#include "fsl_dcu_drm_drv.h"
20
21static int
22fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
23 struct drm_crtc_state *crtc_state,
24 struct drm_connector_state *conn_state)
25{
26 return 0;
27}
28
29static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
30{
31}
32
33static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
34{
35}
36
37static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
38 .atomic_check = fsl_dcu_drm_encoder_atomic_check,
39 .disable = fsl_dcu_drm_encoder_disable,
40 .enable = fsl_dcu_drm_encoder_enable,
41};
42
43static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
44{
45 drm_encoder_cleanup(encoder);
46}
47
48static const struct drm_encoder_funcs encoder_funcs = {
49 .destroy = fsl_dcu_drm_encoder_destroy,
50};
51
52int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
53 struct drm_crtc *crtc)
54{
55 struct drm_encoder *encoder = &fsl_dev->encoder;
56 int ret;
57
58 encoder->possible_crtcs = 1;
59 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
60 DRM_MODE_ENCODER_LVDS);
61 if (ret < 0)
62 return ret;
63
64 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
65
66 return 0;
67}
68
69static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
70{
71 drm_connector_unregister(connector);
72 drm_connector_cleanup(connector);
73}
74
75static enum drm_connector_status
76fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
77{
78 return connector_status_connected;
79}
80
81static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
82 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
83 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
84 .destroy = fsl_dcu_drm_connector_destroy,
85 .detect = fsl_dcu_drm_connector_detect,
86 .dpms = drm_atomic_helper_connector_dpms,
87 .fill_modes = drm_helper_probe_single_connector_modes,
88 .reset = drm_atomic_helper_connector_reset,
89};
90
91static struct drm_encoder *
92fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
93{
94 struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
95
96 return fsl_con->encoder;
97}
98
99static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
100{
101 struct fsl_dcu_drm_connector *fsl_connector;
102 int (*get_modes)(struct drm_panel *panel);
103 int num_modes = 0;
104
105 fsl_connector = to_fsl_dcu_connector(connector);
106 if (fsl_connector->panel && fsl_connector->panel->funcs &&
107 fsl_connector->panel->funcs->get_modes) {
108 get_modes = fsl_connector->panel->funcs->get_modes;
109 num_modes = get_modes(fsl_connector->panel);
110 }
111
112 return num_modes;
113}
114
115static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
116 struct drm_display_mode *mode)
117{
118 if (mode->hdisplay & 0xf)
119 return MODE_ERROR;
120
121 return MODE_OK;
122}
123
124static const struct drm_connector_helper_funcs connector_helper_funcs = {
125 .best_encoder = fsl_dcu_drm_connector_best_encoder,
126 .get_modes = fsl_dcu_drm_connector_get_modes,
127 .mode_valid = fsl_dcu_drm_connector_mode_valid,
128};
129
130int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
131 struct drm_encoder *encoder)
132{
133 struct drm_connector *connector = &fsl_dev->connector.base;
134 struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
135 struct device_node *panel_node;
136 int ret;
137
138 fsl_dev->connector.encoder = encoder;
139
140 ret = drm_connector_init(fsl_dev->drm, connector,
141 &fsl_dcu_drm_connector_funcs,
142 DRM_MODE_CONNECTOR_LVDS);
143 if (ret < 0)
144 return ret;
145
146 drm_connector_helper_add(connector, &connector_helper_funcs);
147 ret = drm_connector_register(connector);
148 if (ret < 0)
149 goto err_cleanup;
150
151 ret = drm_mode_connector_attach_encoder(connector, encoder);
152 if (ret < 0)
153 goto err_sysfs;
154
155 drm_object_property_set_value(&connector->base,
156 mode_config.dpms_property,
157 DRM_MODE_DPMS_OFF);
158
159 panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
160 if (panel_node) {
161 fsl_dev->connector.panel = of_drm_find_panel(panel_node);
162 if (!fsl_dev->connector.panel) {
163 ret = -EPROBE_DEFER;
164 goto err_sysfs;
165 }
166 of_node_put(panel_node);
167 }
168
169 ret = drm_panel_attach(fsl_dev->connector.panel, connector);
170 if (ret) {
171 dev_err(fsl_dev->dev, "failed to attach panel\n");
172 goto err_sysfs;
173 }
174
175 return 0;
176
177err_sysfs:
178 drm_connector_unregister(connector);
179err_cleanup:
180 drm_connector_cleanup(connector);
181 return ret;
182}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index de6f62a6ceb7..db9f7d011832 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -276,12 +276,12 @@ static void psbfb_copyarea_accel(struct fb_info *info,
276 break; 276 break;
277 default: 277 default:
278 /* software fallback */ 278 /* software fallback */
279 cfb_copyarea(info, a); 279 drm_fb_helper_cfb_copyarea(info, a);
280 return; 280 return;
281 } 281 }
282 282
283 if (!gma_power_begin(dev, false)) { 283 if (!gma_power_begin(dev, false)) {
284 cfb_copyarea(info, a); 284 drm_fb_helper_cfb_copyarea(info, a);
285 return; 285 return;
286 } 286 }
287 psb_accel_2d_copy(dev_priv, 287 psb_accel_2d_copy(dev_priv,
@@ -308,7 +308,7 @@ void psbfb_copyarea(struct fb_info *info,
308 /* Avoid the 8 pixel erratum */ 308 /* Avoid the 8 pixel erratum */
309 if (region->width == 8 || region->height == 8 || 309 if (region->width == 8 || region->height == 8 ||
310 (info->flags & FBINFO_HWACCEL_DISABLED)) 310 (info->flags & FBINFO_HWACCEL_DISABLED))
311 return cfb_copyarea(info, region); 311 return drm_fb_helper_cfb_copyarea(info, region);
312 312
313 psbfb_copyarea_accel(info, region); 313 psbfb_copyarea_accel(info, region);
314} 314}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2d42ce6d3757..2eaf1b31c7bd 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -194,9 +194,9 @@ static struct fb_ops psbfb_ops = {
194 .fb_set_par = drm_fb_helper_set_par, 194 .fb_set_par = drm_fb_helper_set_par,
195 .fb_blank = drm_fb_helper_blank, 195 .fb_blank = drm_fb_helper_blank,
196 .fb_setcolreg = psbfb_setcolreg, 196 .fb_setcolreg = psbfb_setcolreg,
197 .fb_fillrect = cfb_fillrect, 197 .fb_fillrect = drm_fb_helper_cfb_fillrect,
198 .fb_copyarea = psbfb_copyarea, 198 .fb_copyarea = psbfb_copyarea,
199 .fb_imageblit = cfb_imageblit, 199 .fb_imageblit = drm_fb_helper_cfb_imageblit,
200 .fb_mmap = psbfb_mmap, 200 .fb_mmap = psbfb_mmap,
201 .fb_sync = psbfb_sync, 201 .fb_sync = psbfb_sync,
202 .fb_ioctl = psbfb_ioctl, 202 .fb_ioctl = psbfb_ioctl,
@@ -208,9 +208,9 @@ static struct fb_ops psbfb_roll_ops = {
208 .fb_set_par = drm_fb_helper_set_par, 208 .fb_set_par = drm_fb_helper_set_par,
209 .fb_blank = drm_fb_helper_blank, 209 .fb_blank = drm_fb_helper_blank,
210 .fb_setcolreg = psbfb_setcolreg, 210 .fb_setcolreg = psbfb_setcolreg,
211 .fb_fillrect = cfb_fillrect, 211 .fb_fillrect = drm_fb_helper_cfb_fillrect,
212 .fb_copyarea = cfb_copyarea, 212 .fb_copyarea = drm_fb_helper_cfb_copyarea,
213 .fb_imageblit = cfb_imageblit, 213 .fb_imageblit = drm_fb_helper_cfb_imageblit,
214 .fb_pan_display = psbfb_pan, 214 .fb_pan_display = psbfb_pan,
215 .fb_mmap = psbfb_mmap, 215 .fb_mmap = psbfb_mmap,
216 .fb_ioctl = psbfb_ioctl, 216 .fb_ioctl = psbfb_ioctl,
@@ -222,9 +222,9 @@ static struct fb_ops psbfb_unaccel_ops = {
222 .fb_set_par = drm_fb_helper_set_par, 222 .fb_set_par = drm_fb_helper_set_par,
223 .fb_blank = drm_fb_helper_blank, 223 .fb_blank = drm_fb_helper_blank,
224 .fb_setcolreg = psbfb_setcolreg, 224 .fb_setcolreg = psbfb_setcolreg,
225 .fb_fillrect = cfb_fillrect, 225 .fb_fillrect = drm_fb_helper_cfb_fillrect,
226 .fb_copyarea = cfb_copyarea, 226 .fb_copyarea = drm_fb_helper_cfb_copyarea,
227 .fb_imageblit = cfb_imageblit, 227 .fb_imageblit = drm_fb_helper_cfb_imageblit,
228 .fb_mmap = psbfb_mmap, 228 .fb_mmap = psbfb_mmap,
229 .fb_ioctl = psbfb_ioctl, 229 .fb_ioctl = psbfb_ioctl,
230}; 230};
@@ -343,7 +343,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
343 struct drm_framebuffer *fb; 343 struct drm_framebuffer *fb;
344 struct psb_framebuffer *psbfb = &fbdev->pfb; 344 struct psb_framebuffer *psbfb = &fbdev->pfb;
345 struct drm_mode_fb_cmd2 mode_cmd; 345 struct drm_mode_fb_cmd2 mode_cmd;
346 struct device *device = &dev->pdev->dev;
347 int size; 346 int size;
348 int ret; 347 int ret;
349 struct gtt_range *backing; 348 struct gtt_range *backing;
@@ -409,9 +408,9 @@ static int psbfb_create(struct psb_fbdev *fbdev,
409 408
410 mutex_lock(&dev->struct_mutex); 409 mutex_lock(&dev->struct_mutex);
411 410
412 info = framebuffer_alloc(0, device); 411 info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
413 if (!info) { 412 if (IS_ERR(info)) {
414 ret = -ENOMEM; 413 ret = PTR_ERR(info);
415 goto out_err1; 414 goto out_err1;
416 } 415 }
417 info->par = fbdev; 416 info->par = fbdev;
@@ -426,7 +425,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
426 psbfb->fbdev = info; 425 psbfb->fbdev = info;
427 426
428 fbdev->psb_fb_helper.fb = fb; 427 fbdev->psb_fb_helper.fb = fb;
429 fbdev->psb_fb_helper.fbdev = info;
430 428
431 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 429 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
432 strcpy(info->fix.id, "psbdrmfb"); 430 strcpy(info->fix.id, "psbdrmfb");
@@ -440,12 +438,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
440 } else /* Software */ 438 } else /* Software */
441 info->fbops = &psbfb_unaccel_ops; 439 info->fbops = &psbfb_unaccel_ops;
442 440
443 ret = fb_alloc_cmap(&info->cmap, 256, 0);
444 if (ret) {
445 ret = -ENOMEM;
446 goto out_unref;
447 }
448
449 info->fix.smem_start = dev->mode_config.fb_base; 441 info->fix.smem_start = dev->mode_config.fb_base;
450 info->fix.smem_len = size; 442 info->fix.smem_len = size;
451 info->fix.ywrapstep = gtt_roll; 443 info->fix.ywrapstep = gtt_roll;
@@ -456,11 +448,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
456 info->screen_size = size; 448 info->screen_size = size;
457 449
458 if (dev_priv->gtt.stolen_size) { 450 if (dev_priv->gtt.stolen_size) {
459 info->apertures = alloc_apertures(1);
460 if (!info->apertures) {
461 ret = -ENOMEM;
462 goto out_unref;
463 }
464 info->apertures->ranges[0].base = dev->mode_config.fb_base; 451 info->apertures->ranges[0].base = dev->mode_config.fb_base;
465 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size; 452 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
466 } 453 }
@@ -483,6 +470,8 @@ out_unref:
483 psb_gtt_free_range(dev, backing); 470 psb_gtt_free_range(dev, backing);
484 else 471 else
485 drm_gem_object_unreference(&backing->gem); 472 drm_gem_object_unreference(&backing->gem);
473
474 drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
486out_err1: 475out_err1:
487 mutex_unlock(&dev->struct_mutex); 476 mutex_unlock(&dev->struct_mutex);
488 psb_gtt_free_range(dev, backing); 477 psb_gtt_free_range(dev, backing);
@@ -570,16 +559,11 @@ static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
570 559
571static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) 560static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
572{ 561{
573 struct fb_info *info;
574 struct psb_framebuffer *psbfb = &fbdev->pfb; 562 struct psb_framebuffer *psbfb = &fbdev->pfb;
575 563
576 if (fbdev->psb_fb_helper.fbdev) { 564 drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
577 info = fbdev->psb_fb_helper.fbdev; 565 drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
578 unregister_framebuffer(info); 566
579 if (info->cmap.len)
580 fb_dealloc_cmap(&info->cmap);
581 framebuffer_release(info);
582 }
583 drm_fb_helper_fini(&fbdev->psb_fb_helper); 567 drm_fb_helper_fini(&fbdev->psb_fb_helper);
584 drm_framebuffer_unregister_private(&psbfb->base); 568 drm_framebuffer_unregister_private(&psbfb->base);
585 drm_framebuffer_cleanup(&psbfb->base); 569 drm_framebuffer_cleanup(&psbfb->base);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index eb87e2538861..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,21 +36,6 @@ config DRM_I915
36 i810 driver instead, and the Atom z5xx series has an entirely 36 i810 driver instead, and the Atom z5xx series has an entirely
37 different implementation. 37 different implementation.
38 38
39config DRM_I915_FBDEV
40 bool "Enable legacy fbdev support for the modesetting intel driver"
41 depends on DRM_I915
42 select DRM_KMS_FB_HELPER
43 select FB_CFB_FILLRECT
44 select FB_CFB_COPYAREA
45 select FB_CFB_IMAGEBLIT
46 default y
47 help
48 Choose this option if you have a need for the legacy fbdev
49 support. Note that this support also provide the linux console
50 support on top of the intel modesetting driver.
51
52 If in doubt, say "Y".
53
54config DRM_I915_PRELIMINARY_HW_SUPPORT 39config DRM_I915_PRELIMINARY_HW_SUPPORT
55 bool "Enable preliminary support for prerelease Intel hardware by default" 40 bool "Enable preliminary support for prerelease Intel hardware by default"
56 depends on DRM_I915 41 depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ddb69f337dc6..44d290ae1999 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -66,7 +66,7 @@ i915-y += intel_audio.o \
66 intel_sideband.o \ 66 intel_sideband.o \
67 intel_sprite.o 67 intel_sprite.o
68i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 68i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
69i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o 69i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o
70 70
71# modesetting output/encoder code 71# modesetting output/encoder code
72i915-y += dvo_ch7017.o \ 72i915-y += dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2f1b693d7162..4563f8b955ea 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1863,7 +1863,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1863 struct intel_framebuffer *fb; 1863 struct intel_framebuffer *fb;
1864 struct drm_framebuffer *drm_fb; 1864 struct drm_framebuffer *drm_fb;
1865 1865
1866#ifdef CONFIG_DRM_I915_FBDEV 1866#ifdef CONFIG_DRM_FBDEV_EMULATION
1867 struct drm_i915_private *dev_priv = dev->dev_private; 1867 struct drm_i915_private *dev_priv = dev->dev_private;
1868 1868
1869 ifbdev = dev_priv->fbdev; 1869 ifbdev = dev_priv->fbdev;
@@ -2700,6 +2700,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2700 return "PORT_DDI_D_2_LANES"; 2700 return "PORT_DDI_D_2_LANES";
2701 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2701 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2702 return "PORT_DDI_D_4_LANES"; 2702 return "PORT_DDI_D_4_LANES";
2703 case POWER_DOMAIN_PORT_DDI_E_2_LANES:
2704 return "PORT_DDI_E_2_LANES";
2703 case POWER_DOMAIN_PORT_DSI: 2705 case POWER_DOMAIN_PORT_DSI:
2704 return "PORT_DSI"; 2706 return "PORT_DSI";
2705 case POWER_DOMAIN_PORT_CRT: 2707 case POWER_DOMAIN_PORT_CRT:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4646fe1a0499..4737d15de5f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -665,15 +665,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
665 665
666 pci_disable_device(drm_dev->pdev); 666 pci_disable_device(drm_dev->pdev);
667 /* 667 /*
668 * During hibernation on some GEN4 platforms the BIOS may try to access 668 * During hibernation on some platforms the BIOS may try to access
669 * the device even though it's already in D3 and hang the machine. So 669 * the device even though it's already in D3 and hang the machine. So
670 * leave the device in D0 on those platforms and hope the BIOS will 670 * leave the device in D0 on those platforms and hope the BIOS will
671 * power down the device properly. Platforms where this was seen: 671 * power down the device properly. The issue was seen on multiple old
672 * Lenovo Thinkpad X301, X61s 672 * GENs with different BIOS vendors, so having an explicit blacklist
673 * is inpractical; apply the workaround on everything pre GEN6. The
674 * platforms where the issue was seen:
675 * Lenovo Thinkpad X301, X61s, X60, T60, X41
676 * Fujitsu FSC S7110
677 * Acer Aspire 1830T
673 */ 678 */
674 if (!(hibernation && 679 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
675 drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
676 INTEL_INFO(dev_priv)->gen == 4))
677 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 680 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
678 681
679 return 0; 682 return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1d99402e4a1e..4eabe19a684f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -188,6 +188,7 @@ enum intel_display_power_domain {
188 POWER_DOMAIN_PORT_DDI_C_4_LANES, 188 POWER_DOMAIN_PORT_DDI_C_4_LANES,
189 POWER_DOMAIN_PORT_DDI_D_2_LANES, 189 POWER_DOMAIN_PORT_DDI_D_2_LANES,
190 POWER_DOMAIN_PORT_DDI_D_4_LANES, 190 POWER_DOMAIN_PORT_DDI_D_4_LANES,
191 POWER_DOMAIN_PORT_DDI_E_2_LANES,
191 POWER_DOMAIN_PORT_DSI, 192 POWER_DOMAIN_PORT_DSI,
192 POWER_DOMAIN_PORT_CRT, 193 POWER_DOMAIN_PORT_CRT,
193 POWER_DOMAIN_PORT_OTHER, 194 POWER_DOMAIN_PORT_OTHER,
@@ -220,6 +221,7 @@ enum hpd_pin {
220 HPD_PORT_B, 221 HPD_PORT_B,
221 HPD_PORT_C, 222 HPD_PORT_C,
222 HPD_PORT_D, 223 HPD_PORT_D,
224 HPD_PORT_E,
223 HPD_NUM_PINS 225 HPD_NUM_PINS
224}; 226};
225 227
@@ -1421,6 +1423,10 @@ enum modeset_restore {
1421#define DP_AUX_C 0x20 1423#define DP_AUX_C 0x20
1422#define DP_AUX_D 0x30 1424#define DP_AUX_D 0x30
1423 1425
1426#define DDC_PIN_B 0x05
1427#define DDC_PIN_C 0x04
1428#define DDC_PIN_D 0x06
1429
1424struct ddi_vbt_port_info { 1430struct ddi_vbt_port_info {
1425 /* 1431 /*
1426 * This is an index in the HDMI/DVI DDI buffer translation table. 1432 * This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1435,6 +1441,7 @@ struct ddi_vbt_port_info {
1435 uint8_t supports_dp:1; 1441 uint8_t supports_dp:1;
1436 1442
1437 uint8_t alternate_aux_channel; 1443 uint8_t alternate_aux_channel;
1444 uint8_t alternate_ddc_pin;
1438 1445
1439 uint8_t dp_boost_level; 1446 uint8_t dp_boost_level;
1440 uint8_t hdmi_boost_level; 1447 uint8_t hdmi_boost_level;
@@ -1875,7 +1882,7 @@ struct drm_i915_private {
1875 1882
1876 struct drm_i915_gem_object *vlv_pctx; 1883 struct drm_i915_gem_object *vlv_pctx;
1877 1884
1878#ifdef CONFIG_DRM_I915_FBDEV 1885#ifdef CONFIG_DRM_FBDEV_EMULATION
1879 /* list of fbdev register on this device */ 1886 /* list of fbdev register on this device */
1880 struct intel_fbdev *fbdev; 1887 struct intel_fbdev *fbdev;
1881 struct work_struct fbdev_suspend_work; 1888 struct work_struct fbdev_suspend_work;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a36cb95ec798..f361c4a56995 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -348,7 +348,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
348 * memory, so just consider the start. */ 348 * memory, so just consider the start. */
349 reserved_total = stolen_top - reserved_base; 349 reserved_total = stolen_top - reserved_base;
350 350
351 DRM_DEBUG_KMS("Memory reserved for graphics device: %luK, usable: %luK\n", 351 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
352 dev_priv->gtt.stolen_size >> 10, 352 dev_priv->gtt.stolen_size >> 10,
353 (dev_priv->gtt.stolen_size - reserved_total) >> 10); 353 (dev_priv->gtt.stolen_size - reserved_total) >> 10);
354 354
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a05104562932..8485bea966cc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -61,6 +61,13 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62}; 62};
63 63
64static const u32 hpd_spt[HPD_NUM_PINS] = {
65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
68 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
69};
70
64static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 71static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 72 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 73 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
@@ -1253,6 +1260,8 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1253 return val & PORTC_HOTPLUG_LONG_DETECT; 1260 return val & PORTC_HOTPLUG_LONG_DETECT;
1254 case PORT_D: 1261 case PORT_D:
1255 return val & PORTD_HOTPLUG_LONG_DETECT; 1262 return val & PORTD_HOTPLUG_LONG_DETECT;
1263 case PORT_E:
1264 return val & PORTE_HOTPLUG_LONG_DETECT;
1256 default: 1265 default:
1257 return false; 1266 return false;
1258 } 1267 }
@@ -1753,7 +1762,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1753{ 1762{
1754 struct drm_i915_private *dev_priv = dev->dev_private; 1763 struct drm_i915_private *dev_priv = dev->dev_private;
1755 int pipe; 1764 int pipe;
1756 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1765 u32 hotplug_trigger;
1766
1767 if (HAS_PCH_SPT(dev))
1768 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
1769 else
1770 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1757 1771
1758 if (hotplug_trigger) { 1772 if (hotplug_trigger) {
1759 u32 dig_hotplug_reg, pin_mask, long_mask; 1773 u32 dig_hotplug_reg, pin_mask, long_mask;
@@ -1761,9 +1775,23 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1761 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1775 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1762 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1776 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1763 1777
1764 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1778 if (HAS_PCH_SPT(dev)) {
1765 dig_hotplug_reg, hpd_cpt, 1779 intel_get_hpd_pins(&pin_mask, &long_mask,
1766 pch_port_hotplug_long_detect); 1780 hotplug_trigger,
1781 dig_hotplug_reg, hpd_spt,
1782 pch_port_hotplug_long_detect);
1783
1784 /* detect PORTE HP event */
1785 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1786 if (pch_port_hotplug_long_detect(PORT_E,
1787 dig_hotplug_reg))
1788 long_mask |= 1 << HPD_PORT_E;
1789 } else
1790 intel_get_hpd_pins(&pin_mask, &long_mask,
1791 hotplug_trigger,
1792 dig_hotplug_reg, hpd_cpt,
1793 pch_port_hotplug_long_detect);
1794
1767 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1795 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1768 } 1796 }
1769 1797
@@ -2985,6 +3013,11 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
2985 for_each_intel_encoder(dev, intel_encoder) 3013 for_each_intel_encoder(dev, intel_encoder)
2986 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3014 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
2987 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3015 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3016 } else if (HAS_PCH_SPT(dev)) {
3017 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3018 for_each_intel_encoder(dev, intel_encoder)
3019 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3020 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
2988 } else { 3021 } else {
2989 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3022 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2990 for_each_intel_encoder(dev, intel_encoder) 3023 for_each_intel_encoder(dev, intel_encoder)
@@ -3006,6 +3039,13 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
3006 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3039 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3007 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3040 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3008 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3041 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3042
3043 /* enable SPT PORTE hot plug */
3044 if (HAS_PCH_SPT(dev)) {
3045 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3046 hotplug |= PORTE_HOTPLUG_ENABLE;
3047 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3048 }
3009} 3049}
3010 3050
3011static void bxt_hpd_irq_setup(struct drm_device *dev) 3051static void bxt_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c82db2aed55c..e7c9dc8e24fe 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -5981,6 +5981,7 @@ enum skl_disp_power_wells {
5981#define SDE_AUXC_CPT (1 << 26) 5981#define SDE_AUXC_CPT (1 << 26)
5982#define SDE_AUXB_CPT (1 << 25) 5982#define SDE_AUXB_CPT (1 << 25)
5983#define SDE_AUX_MASK_CPT (7 << 25) 5983#define SDE_AUX_MASK_CPT (7 << 25)
5984#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
5984#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 5985#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
5985#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 5986#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
5986#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 5987#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
@@ -5991,6 +5992,10 @@ enum skl_disp_power_wells {
5991 SDE_PORTD_HOTPLUG_CPT | \ 5992 SDE_PORTD_HOTPLUG_CPT | \
5992 SDE_PORTC_HOTPLUG_CPT | \ 5993 SDE_PORTC_HOTPLUG_CPT | \
5993 SDE_PORTB_HOTPLUG_CPT) 5994 SDE_PORTB_HOTPLUG_CPT)
5995#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
5996 SDE_PORTD_HOTPLUG_CPT | \
5997 SDE_PORTC_HOTPLUG_CPT | \
5998 SDE_PORTB_HOTPLUG_CPT)
5994#define SDE_GMBUS_CPT (1 << 17) 5999#define SDE_GMBUS_CPT (1 << 17)
5995#define SDE_ERROR_CPT (1 << 16) 6000#define SDE_ERROR_CPT (1 << 16)
5996#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) 6001#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
@@ -6062,6 +6067,13 @@ enum skl_disp_power_wells {
6062#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 6067#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
6063#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) 6068#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
6064 6069
6070#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */
6071#define PORTE_HOTPLUG_ENABLE (1 << 4)
6072#define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0)
6073#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
6074#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
6075#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
6076
6065#define PCH_GPIOA 0xc5010 6077#define PCH_GPIOA 0xc5010
6066#define PCH_GPIOB 0xc5014 6078#define PCH_GPIOB 0xc5014
6067#define PCH_GPIOC 0xc5018 6079#define PCH_GPIOC 0xc5018
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 8e46149bafdd..b3e437b3bb54 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -401,7 +401,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
401{ 401{
402 struct sdvo_device_mapping *p_mapping; 402 struct sdvo_device_mapping *p_mapping;
403 const struct bdb_general_definitions *p_defs; 403 const struct bdb_general_definitions *p_defs;
404 const union child_device_config *p_child; 404 const struct old_child_dev_config *child; /* legacy */
405 int i, child_device_num, count; 405 int i, child_device_num, count;
406 u16 block_size; 406 u16 block_size;
407 407
@@ -410,14 +410,14 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
410 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); 410 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
411 return; 411 return;
412 } 412 }
413 /* judge whether the size of child device meets the requirements. 413
414 * If the child device size obtained from general definition block 414 /*
415 * is different with sizeof(struct child_device_config), skip the 415 * Only parse SDVO mappings when the general definitions block child
416 * parsing of sdvo device info 416 * device size matches that of the *legacy* child device config
417 * struct. Thus, SDVO mapping will be skipped for newer VBT.
417 */ 418 */
418 if (p_defs->child_dev_size != sizeof(*p_child)) { 419 if (p_defs->child_dev_size != sizeof(*child)) {
419 /* different child dev size . Ignore it */ 420 DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
420 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
421 return; 421 return;
422 } 422 }
423 /* get the block size of general definitions */ 423 /* get the block size of general definitions */
@@ -427,37 +427,37 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
427 p_defs->child_dev_size; 427 p_defs->child_dev_size;
428 count = 0; 428 count = 0;
429 for (i = 0; i < child_device_num; i++) { 429 for (i = 0; i < child_device_num; i++) {
430 p_child = child_device_ptr(p_defs, i); 430 child = &child_device_ptr(p_defs, i)->old;
431 if (!p_child->old.device_type) { 431 if (!child->device_type) {
432 /* skip the device block if device type is invalid */ 432 /* skip the device block if device type is invalid */
433 continue; 433 continue;
434 } 434 }
435 if (p_child->old.slave_addr != SLAVE_ADDR1 && 435 if (child->slave_addr != SLAVE_ADDR1 &&
436 p_child->old.slave_addr != SLAVE_ADDR2) { 436 child->slave_addr != SLAVE_ADDR2) {
437 /* 437 /*
438 * If the slave address is neither 0x70 nor 0x72, 438 * If the slave address is neither 0x70 nor 0x72,
439 * it is not a SDVO device. Skip it. 439 * it is not a SDVO device. Skip it.
440 */ 440 */
441 continue; 441 continue;
442 } 442 }
443 if (p_child->old.dvo_port != DEVICE_PORT_DVOB && 443 if (child->dvo_port != DEVICE_PORT_DVOB &&
444 p_child->old.dvo_port != DEVICE_PORT_DVOC) { 444 child->dvo_port != DEVICE_PORT_DVOC) {
445 /* skip the incorrect SDVO port */ 445 /* skip the incorrect SDVO port */
446 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); 446 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
447 continue; 447 continue;
448 } 448 }
449 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 449 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
450 " %s port\n", 450 " %s port\n",
451 p_child->old.slave_addr, 451 child->slave_addr,
452 (p_child->old.dvo_port == DEVICE_PORT_DVOB) ? 452 (child->dvo_port == DEVICE_PORT_DVOB) ?
453 "SDVOB" : "SDVOC"); 453 "SDVOB" : "SDVOC");
454 p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]); 454 p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
455 if (!p_mapping->initialized) { 455 if (!p_mapping->initialized) {
456 p_mapping->dvo_port = p_child->old.dvo_port; 456 p_mapping->dvo_port = child->dvo_port;
457 p_mapping->slave_addr = p_child->old.slave_addr; 457 p_mapping->slave_addr = child->slave_addr;
458 p_mapping->dvo_wiring = p_child->old.dvo_wiring; 458 p_mapping->dvo_wiring = child->dvo_wiring;
459 p_mapping->ddc_pin = p_child->old.ddc_pin; 459 p_mapping->ddc_pin = child->ddc_pin;
460 p_mapping->i2c_pin = p_child->old.i2c_pin; 460 p_mapping->i2c_pin = child->i2c_pin;
461 p_mapping->initialized = 1; 461 p_mapping->initialized = 1;
462 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", 462 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
463 p_mapping->dvo_port, 463 p_mapping->dvo_port,
@@ -469,7 +469,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
469 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 469 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
470 "two SDVO device.\n"); 470 "two SDVO device.\n");
471 } 471 }
472 if (p_child->old.slave2_addr) { 472 if (child->slave2_addr) {
473 /* Maybe this is a SDVO device with multiple inputs */ 473 /* Maybe this is a SDVO device with multiple inputs */
474 /* And the mapping info is not added */ 474 /* And the mapping info is not added */
475 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" 475 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -905,23 +905,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
905 uint8_t hdmi_level_shift; 905 uint8_t hdmi_level_shift;
906 int i, j; 906 int i, j;
907 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; 907 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
908 uint8_t aux_channel; 908 uint8_t aux_channel, ddc_pin;
909 /* Each DDI port can have more than one value on the "DVO Port" field, 909 /* Each DDI port can have more than one value on the "DVO Port" field,
910 * so look for all the possible values for each port and abort if more 910 * so look for all the possible values for each port and abort if more
911 * than one is found. */ 911 * than one is found. */
912 int dvo_ports[][2] = { 912 int dvo_ports[][3] = {
913 {DVO_PORT_HDMIA, DVO_PORT_DPA}, 913 {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
914 {DVO_PORT_HDMIB, DVO_PORT_DPB}, 914 {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
915 {DVO_PORT_HDMIC, DVO_PORT_DPC}, 915 {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
916 {DVO_PORT_HDMID, DVO_PORT_DPD}, 916 {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
917 {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ }, 917 {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
918 }; 918 };
919 919
920 /* Find the child device to use, abort if more than one found. */ 920 /* Find the child device to use, abort if more than one found. */
921 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 921 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
922 it = dev_priv->vbt.child_dev + i; 922 it = dev_priv->vbt.child_dev + i;
923 923
924 for (j = 0; j < 2; j++) { 924 for (j = 0; j < 3; j++) {
925 if (dvo_ports[port][j] == -1) 925 if (dvo_ports[port][j] == -1)
926 break; 926 break;
927 927
@@ -939,6 +939,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
939 return; 939 return;
940 940
941 aux_channel = child->raw[25]; 941 aux_channel = child->raw[25];
942 ddc_pin = child->common.ddc_pin;
942 943
943 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; 944 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
944 is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; 945 is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -970,11 +971,27 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
970 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 971 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
971 972
972 if (is_dvi) { 973 if (is_dvi) {
973 if (child->common.ddc_pin == 0x05 && port != PORT_B) 974 if (port == PORT_E) {
975 info->alternate_ddc_pin = ddc_pin;
976 /* if DDIE share ddc pin with other port, then
977 * dvi/hdmi couldn't exist on the shared port.
978 * Otherwise they share the same ddc bin and system
979 * couldn't communicate with them seperately. */
980 if (ddc_pin == DDC_PIN_B) {
981 dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
982 dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
983 } else if (ddc_pin == DDC_PIN_C) {
984 dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
985 dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
986 } else if (ddc_pin == DDC_PIN_D) {
987 dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
988 dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
989 }
990 } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
974 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n"); 991 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
975 if (child->common.ddc_pin == 0x04 && port != PORT_C) 992 else if (ddc_pin == DDC_PIN_C && port != PORT_C)
976 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n"); 993 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
977 if (child->common.ddc_pin == 0x06 && port != PORT_D) 994 else if (ddc_pin == DDC_PIN_D && port != PORT_D)
978 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n"); 995 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
979 } 996 }
980 997
@@ -1060,27 +1077,30 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1060 return; 1077 return;
1061 } 1078 }
1062 if (bdb->version < 195) { 1079 if (bdb->version < 195) {
1063 expected_size = 33; 1080 expected_size = sizeof(struct old_child_dev_config);
1064 } else if (bdb->version == 195) { 1081 } else if (bdb->version == 195) {
1065 expected_size = 37; 1082 expected_size = 37;
1066 } else if (bdb->version <= 197) { 1083 } else if (bdb->version <= 197) {
1067 expected_size = 38; 1084 expected_size = 38;
1068 } else { 1085 } else {
1069 expected_size = 38; 1086 expected_size = 38;
1070 DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", 1087 BUILD_BUG_ON(sizeof(*p_child) < 38);
1071 expected_size, bdb->version); 1088 DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
1089 bdb->version, expected_size);
1072 } 1090 }
1073 1091
1074 if (expected_size > sizeof(*p_child)) { 1092 /* The legacy sized child device config is the minimum we need. */
1075 DRM_ERROR("child_device_config cannot fit in p_child\n"); 1093 if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
1094 DRM_ERROR("Child device config size %u is too small.\n",
1095 p_defs->child_dev_size);
1076 return; 1096 return;
1077 } 1097 }
1078 1098
1079 if (p_defs->child_dev_size != expected_size) { 1099 /* Flag an error for unexpected size, but continue anyway. */
1080 DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", 1100 if (p_defs->child_dev_size != expected_size)
1101 DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
1081 p_defs->child_dev_size, expected_size, bdb->version); 1102 p_defs->child_dev_size, expected_size, bdb->version);
1082 return; 1103
1083 }
1084 /* get the block size of general definitions */ 1104 /* get the block size of general definitions */
1085 block_size = get_blocksize(p_defs); 1105 block_size = get_blocksize(p_defs);
1086 /* get the number of child device */ 1106 /* get the number of child device */
@@ -1125,7 +1145,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1125 1145
1126 child_dev_ptr = dev_priv->vbt.child_dev + count; 1146 child_dev_ptr = dev_priv->vbt.child_dev + count;
1127 count++; 1147 count++;
1128 memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); 1148
1149 /*
1150 * Copy as much as we know (sizeof) and is available
1151 * (child_dev_size) of the child device. Accessing the data must
1152 * depend on VBT version.
1153 */
1154 memcpy(child_dev_ptr, p_child,
1155 min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
1129 } 1156 }
1130 return; 1157 return;
1131} 1158}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 6d909efbf43f..46cd5c7ebacd 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -203,9 +203,11 @@ struct bdb_general_features {
203#define DEVICE_PORT_DVOB 0x01 203#define DEVICE_PORT_DVOB 0x01
204#define DEVICE_PORT_DVOC 0x02 204#define DEVICE_PORT_DVOC 0x02
205 205
206/* We used to keep this struct but without any version control. We should avoid 206/*
207 * We used to keep this struct but without any version control. We should avoid
207 * using it in the future, but it should be safe to keep using it in the old 208 * using it in the future, but it should be safe to keep using it in the old
208 * code. */ 209 * code. Do not change; we rely on its size.
210 */
209struct old_child_dev_config { 211struct old_child_dev_config {
210 u16 handle; 212 u16 handle;
211 u16 device_type; 213 u16 device_type;
@@ -756,11 +758,6 @@ int intel_parse_bios(struct drm_device *dev);
756#define DVO_C 2 758#define DVO_C 2
757#define DVO_D 3 759#define DVO_D 3
758 760
759/* define the PORT for DP output type */
760#define PORT_IDPB 7
761#define PORT_IDPC 8
762#define PORT_IDPD 9
763
764/* Possible values for the "DVO Port" field for versions >= 155: */ 761/* Possible values for the "DVO Port" field for versions >= 155: */
765#define DVO_PORT_HDMIA 0 762#define DVO_PORT_HDMIA 0
766#define DVO_PORT_HDMIB 1 763#define DVO_PORT_HDMIB 1
@@ -773,6 +770,8 @@ int intel_parse_bios(struct drm_device *dev);
773#define DVO_PORT_DPC 8 770#define DVO_PORT_DPC 8
774#define DVO_PORT_DPD 9 771#define DVO_PORT_DPD 9
775#define DVO_PORT_DPA 10 772#define DVO_PORT_DPA 10
773#define DVO_PORT_DPE 11
774#define DVO_PORT_HDMIE 12
776#define DVO_PORT_MIPIA 21 775#define DVO_PORT_MIPIA 21
777#define DVO_PORT_MIPIB 22 776#define DVO_PORT_MIPIB 22
778#define DVO_PORT_MIPIC 23 777#define DVO_PORT_MIPIC 23
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5dff8b7e0f03..4823184258a0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -128,7 +128,7 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
128 { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ 128 { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
129}; 129};
130 130
131/* Skylake H, S, and Skylake Y with 0.95V VccIO */ 131/* Skylake H and S */
132static const struct ddi_buf_trans skl_ddi_translations_dp[] = { 132static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
133 { 0x00002016, 0x000000A0, 0x0 }, 133 { 0x00002016, 0x000000A0, 0x0 },
134 { 0x00005012, 0x0000009B, 0x0 }, 134 { 0x00005012, 0x0000009B, 0x0 },
@@ -143,23 +143,23 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
143 143
144/* Skylake U */ 144/* Skylake U */
145static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { 145static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
146 { 0x00002016, 0x000000A2, 0x0 }, 146 { 0x0000201B, 0x000000A2, 0x0 },
147 { 0x00005012, 0x00000088, 0x0 }, 147 { 0x00005012, 0x00000088, 0x0 },
148 { 0x00007011, 0x00000087, 0x0 }, 148 { 0x00007011, 0x00000087, 0x0 },
149 { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */ 149 { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */
150 { 0x00002016, 0x0000009D, 0x0 }, 150 { 0x0000201B, 0x0000009D, 0x0 },
151 { 0x00005012, 0x000000C7, 0x0 }, 151 { 0x00005012, 0x000000C7, 0x0 },
152 { 0x00007011, 0x000000C7, 0x0 }, 152 { 0x00007011, 0x000000C7, 0x0 },
153 { 0x00002016, 0x00000088, 0x0 }, 153 { 0x00002016, 0x00000088, 0x0 },
154 { 0x00005012, 0x000000C7, 0x0 }, 154 { 0x00005012, 0x000000C7, 0x0 },
155}; 155};
156 156
157/* Skylake Y with 0.85V VccIO */ 157/* Skylake Y */
158static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = { 158static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
159 { 0x00000018, 0x000000A2, 0x0 }, 159 { 0x00000018, 0x000000A2, 0x0 },
160 { 0x00005012, 0x00000088, 0x0 }, 160 { 0x00005012, 0x00000088, 0x0 },
161 { 0x00007011, 0x00000087, 0x0 }, 161 { 0x00007011, 0x00000087, 0x0 },
162 { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */ 162 { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */
163 { 0x00000018, 0x0000009D, 0x0 }, 163 { 0x00000018, 0x0000009D, 0x0 },
164 { 0x00005012, 0x000000C7, 0x0 }, 164 { 0x00005012, 0x000000C7, 0x0 },
165 { 0x00007011, 0x000000C7, 0x0 }, 165 { 0x00007011, 0x000000C7, 0x0 },
@@ -168,7 +168,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = {
168}; 168};
169 169
170/* 170/*
171 * Skylake H and S, and Skylake Y with 0.95V VccIO 171 * Skylake H and S
172 * eDP 1.4 low vswing translation parameters 172 * eDP 1.4 low vswing translation parameters
173 */ 173 */
174static const struct ddi_buf_trans skl_ddi_translations_edp[] = { 174static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -202,10 +202,10 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
202}; 202};
203 203
204/* 204/*
205 * Skylake Y with 0.95V VccIO 205 * Skylake Y
206 * eDP 1.4 low vswing translation parameters 206 * eDP 1.4 low vswing translation parameters
207 */ 207 */
208static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = { 208static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
209 { 0x00000018, 0x000000A8, 0x0 }, 209 { 0x00000018, 0x000000A8, 0x0 },
210 { 0x00004013, 0x000000AB, 0x0 }, 210 { 0x00004013, 0x000000AB, 0x0 },
211 { 0x00007011, 0x000000A4, 0x0 }, 211 { 0x00007011, 0x000000A4, 0x0 },
@@ -218,7 +218,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = {
218 { 0x00000018, 0x0000008A, 0x0 }, 218 { 0x00000018, 0x0000008A, 0x0 },
219}; 219};
220 220
221/* Skylake H, S and U, and Skylake Y with 0.95V VccIO */ 221/* Skylake U, H and S */
222static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { 222static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
223 { 0x00000018, 0x000000AC, 0x0 }, 223 { 0x00000018, 0x000000AC, 0x0 },
224 { 0x00005012, 0x0000009D, 0x0 }, 224 { 0x00005012, 0x0000009D, 0x0 },
@@ -233,8 +233,8 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
233 { 0x00000018, 0x000000C7, 0x0 }, 233 { 0x00000018, 0x000000C7, 0x0 },
234}; 234};
235 235
236/* Skylake Y with 0.85V VccIO */ 236/* Skylake Y */
237static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = { 237static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
238 { 0x00000018, 0x000000A1, 0x0 }, 238 { 0x00000018, 0x000000A1, 0x0 },
239 { 0x00005012, 0x000000DF, 0x0 }, 239 { 0x00005012, 0x000000DF, 0x0 },
240 { 0x00007011, 0x00000084, 0x0 }, 240 { 0x00007011, 0x00000084, 0x0 },
@@ -244,7 +244,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = {
244 { 0x00006013, 0x000000C7, 0x0 }, 244 { 0x00006013, 0x000000C7, 0x0 },
245 { 0x00000018, 0x0000008A, 0x0 }, 245 { 0x00000018, 0x0000008A, 0x0 },
246 { 0x00003015, 0x000000C7, 0x0 }, /* Default */ 246 { 0x00003015, 0x000000C7, 0x0 }, /* Default */
247 { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost */ 247 { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
248 { 0x00000018, 0x000000C7, 0x0 }, 248 { 0x00000018, 0x000000C7, 0x0 },
249}; 249};
250 250
@@ -335,19 +335,11 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
335static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, 335static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
336 int *n_entries) 336 int *n_entries)
337{ 337{
338 struct drm_i915_private *dev_priv = dev->dev_private;
339 const struct ddi_buf_trans *ddi_translations; 338 const struct ddi_buf_trans *ddi_translations;
340 static int is_095v = -1;
341
342 if (is_095v == -1) {
343 u32 spr1 = I915_READ(UAIMI_SPR1);
344
345 is_095v = spr1 & SKL_VCCIO_MASK;
346 }
347 339
348 if (IS_SKL_ULX(dev) && !is_095v) { 340 if (IS_SKL_ULX(dev)) {
349 ddi_translations = skl_y_085v_ddi_translations_dp; 341 ddi_translations = skl_y_ddi_translations_dp;
350 *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp); 342 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
351 } else if (IS_SKL_ULT(dev)) { 343 } else if (IS_SKL_ULT(dev)) {
352 ddi_translations = skl_u_ddi_translations_dp; 344 ddi_translations = skl_u_ddi_translations_dp;
353 *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); 345 *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
@@ -364,23 +356,14 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
364{ 356{
365 struct drm_i915_private *dev_priv = dev->dev_private; 357 struct drm_i915_private *dev_priv = dev->dev_private;
366 const struct ddi_buf_trans *ddi_translations; 358 const struct ddi_buf_trans *ddi_translations;
367 static int is_095v = -1;
368
369 if (is_095v == -1) {
370 u32 spr1 = I915_READ(UAIMI_SPR1);
371 359
372 is_095v = spr1 & SKL_VCCIO_MASK; 360 if (IS_SKL_ULX(dev)) {
373 }
374
375 if (IS_SKL_ULX(dev) && !is_095v) {
376 if (dev_priv->edp_low_vswing) { 361 if (dev_priv->edp_low_vswing) {
377 ddi_translations = skl_y_085v_ddi_translations_edp; 362 ddi_translations = skl_y_ddi_translations_edp;
378 *n_entries = 363 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
379 ARRAY_SIZE(skl_y_085v_ddi_translations_edp);
380 } else { 364 } else {
381 ddi_translations = skl_y_085v_ddi_translations_dp; 365 ddi_translations = skl_y_ddi_translations_dp;
382 *n_entries = 366 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
383 ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
384 } 367 }
385 } else if (IS_SKL_ULT(dev)) { 368 } else if (IS_SKL_ULT(dev)) {
386 if (dev_priv->edp_low_vswing) { 369 if (dev_priv->edp_low_vswing) {
@@ -407,19 +390,11 @@ static const struct ddi_buf_trans *
407skl_get_buf_trans_hdmi(struct drm_device *dev, 390skl_get_buf_trans_hdmi(struct drm_device *dev,
408 int *n_entries) 391 int *n_entries)
409{ 392{
410 struct drm_i915_private *dev_priv = dev->dev_private;
411 const struct ddi_buf_trans *ddi_translations; 393 const struct ddi_buf_trans *ddi_translations;
412 static int is_095v = -1;
413
414 if (is_095v == -1) {
415 u32 spr1 = I915_READ(UAIMI_SPR1);
416
417 is_095v = spr1 & SKL_VCCIO_MASK;
418 }
419 394
420 if (IS_SKL_ULX(dev) && !is_095v) { 395 if (IS_SKL_ULX(dev)) {
421 ddi_translations = skl_y_085v_ddi_translations_hdmi; 396 ddi_translations = skl_y_ddi_translations_hdmi;
422 *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi); 397 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
423 } else { 398 } else {
424 ddi_translations = skl_ddi_translations_hdmi; 399 ddi_translations = skl_ddi_translations_hdmi;
425 *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); 400 *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d38ceb039de6..deba3330de71 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5102,7 +5102,6 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
5102{ 5102{
5103 switch (port) { 5103 switch (port) {
5104 case PORT_A: 5104 case PORT_A:
5105 case PORT_E:
5106 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 5105 return POWER_DOMAIN_PORT_DDI_A_4_LANES;
5107 case PORT_B: 5106 case PORT_B:
5108 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 5107 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
@@ -5110,6 +5109,8 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
5110 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 5109 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
5111 case PORT_D: 5110 case PORT_D:
5112 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 5111 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
5112 case PORT_E:
5113 return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5113 default: 5114 default:
5114 WARN_ON_ONCE(1); 5115 WARN_ON_ONCE(1);
5115 return POWER_DOMAIN_PORT_OTHER; 5116 return POWER_DOMAIN_PORT_OTHER;
@@ -5684,16 +5685,13 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
5684 /* enable PG1 and Misc I/O */ 5685 /* enable PG1 and Misc I/O */
5685 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5686 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5686 5687
5687 /* DPLL0 already enabed !? */ 5688 /* DPLL0 not enabled (happens on early BIOS versions) */
5688 if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) { 5689 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5689 DRM_DEBUG_DRIVER("DPLL0 already running\n"); 5690 /* enable DPLL0 */
5690 return; 5691 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5692 skl_dpll0_enable(dev_priv, required_vco);
5691 } 5693 }
5692 5694
5693 /* enable DPLL0 */
5694 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5695 skl_dpll0_enable(dev_priv, required_vco);
5696
5697 /* set CDCLK to the frequency the BIOS chose */ 5695 /* set CDCLK to the frequency the BIOS chose */
5698 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5696 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5699 5697
@@ -10114,7 +10112,7 @@ static struct drm_framebuffer *
10114mode_fits_in_fbdev(struct drm_device *dev, 10112mode_fits_in_fbdev(struct drm_device *dev,
10115 struct drm_display_mode *mode) 10113 struct drm_display_mode *mode)
10116{ 10114{
10117#ifdef CONFIG_DRM_I915_FBDEV 10115#ifdef CONFIG_DRM_FBDEV_EMULATION
10118 struct drm_i915_private *dev_priv = dev->dev_private; 10116 struct drm_i915_private *dev_priv = dev->dev_private;
10119 struct drm_i915_gem_object *obj; 10117 struct drm_i915_gem_object *obj;
10120 struct drm_framebuffer *fb; 10118 struct drm_framebuffer *fb;
@@ -13509,7 +13507,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13509 struct intel_plane *primary; 13507 struct intel_plane *primary;
13510 struct intel_plane_state *state; 13508 struct intel_plane_state *state;
13511 const uint32_t *intel_primary_formats; 13509 const uint32_t *intel_primary_formats;
13512 int num_formats; 13510 unsigned int num_formats;
13513 13511
13514 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13512 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13515 if (primary == NULL) 13513 if (primary == NULL)
@@ -13947,6 +13945,15 @@ static void intel_setup_outputs(struct drm_device *dev)
13947 intel_ddi_init(dev, PORT_C); 13945 intel_ddi_init(dev, PORT_C);
13948 if (found & SFUSE_STRAP_DDID_DETECTED) 13946 if (found & SFUSE_STRAP_DDID_DETECTED)
13949 intel_ddi_init(dev, PORT_D); 13947 intel_ddi_init(dev, PORT_D);
13948 /*
13949 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
13950 */
13951 if (IS_SKYLAKE(dev) &&
13952 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
13953 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
13954 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
13955 intel_ddi_init(dev, PORT_E);
13956
13950 } else if (HAS_PCH_SPLIT(dev)) { 13957 } else if (HAS_PCH_SPLIT(dev)) {
13951 int found; 13958 int found;
13952 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 13959 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -14302,7 +14309,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
14302 return intel_framebuffer_create(dev, mode_cmd, obj); 14309 return intel_framebuffer_create(dev, mode_cmd, obj);
14303} 14310}
14304 14311
14305#ifndef CONFIG_DRM_I915_FBDEV 14312#ifndef CONFIG_DRM_FBDEV_EMULATION
14306static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14313static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14307{ 14314{
14308} 14315}
@@ -14717,6 +14724,24 @@ void intel_modeset_init(struct drm_device *dev)
14717 if (INTEL_INFO(dev)->num_pipes == 0) 14724 if (INTEL_INFO(dev)->num_pipes == 0)
14718 return; 14725 return;
14719 14726
14727 /*
14728 * There may be no VBT; and if the BIOS enabled SSC we can
14729 * just keep using it to avoid unnecessary flicker. Whereas if the
14730 * BIOS isn't using it, don't assume it will work even if the VBT
14731 * indicates as much.
14732 */
14733 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
14734 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14735 DREF_SSC1_ENABLE);
14736
14737 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14738 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14739 bios_lvds_use_ssc ? "en" : "dis",
14740 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14741 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14742 }
14743 }
14744
14720 intel_init_display(dev); 14745 intel_init_display(dev);
14721 intel_init_audio(dev); 14746 intel_init_audio(dev);
14722 14747
@@ -15278,7 +15303,6 @@ err:
15278 15303
15279void intel_modeset_gem_init(struct drm_device *dev) 15304void intel_modeset_gem_init(struct drm_device *dev)
15280{ 15305{
15281 struct drm_i915_private *dev_priv = dev->dev_private;
15282 struct drm_crtc *c; 15306 struct drm_crtc *c;
15283 struct drm_i915_gem_object *obj; 15307 struct drm_i915_gem_object *obj;
15284 int ret; 15308 int ret;
@@ -15287,16 +15311,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
15287 intel_init_gt_powersave(dev); 15311 intel_init_gt_powersave(dev);
15288 mutex_unlock(&dev->struct_mutex); 15312 mutex_unlock(&dev->struct_mutex);
15289 15313
15290 /*
15291 * There may be no VBT; and if the BIOS enabled SSC we can
15292 * just keep using it to avoid unnecessary flicker. Whereas if the
15293 * BIOS isn't using it, don't assume it will work even if the VBT
15294 * indicates as much.
15295 */
15296 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
15297 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15298 DREF_SSC1_ENABLE);
15299
15300 intel_modeset_init_hw(dev); 15314 intel_modeset_init_hw(dev);
15301 15315
15302 intel_setup_overlay(dev); 15316 intel_setup_overlay(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f45872cc6d24..f8f4d99440c1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -95,9 +95,6 @@ static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 }; 95 324000, 432000, 540000 };
96static const int skl_rates[] = { 162000, 216000, 270000, 96static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 }; 97 324000, 432000, 540000 };
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101static const int default_rates[] = { 162000, 270000, 540000 }; 98static const int default_rates[] = { 162000, 270000, 540000 };
102 99
103/** 100/**
@@ -1159,7 +1156,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1159 pipe_config->dpll_hw_state.ctrl1 = ctrl1; 1156 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1160} 1157}
1161 1158
1162static void 1159void
1163hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config) 1160hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1164{ 1161{
1165 memset(&pipe_config->dpll_hw_state, 0, 1162 memset(&pipe_config->dpll_hw_state, 0,
@@ -1191,30 +1188,40 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1191 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; 1188 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1192} 1189}
1193 1190
1191static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1192{
1193 /* WaDisableHBR2:skl */
1194 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1195 return false;
1196
1197 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1198 (INTEL_INFO(dev)->gen >= 9))
1199 return true;
1200 else
1201 return false;
1202}
1203
1194static int 1204static int
1195intel_dp_source_rates(struct drm_device *dev, const int **source_rates) 1205intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1196{ 1206{
1207 int size;
1208
1197 if (IS_BROXTON(dev)) { 1209 if (IS_BROXTON(dev)) {
1198 *source_rates = bxt_rates; 1210 *source_rates = bxt_rates;
1199 return ARRAY_SIZE(bxt_rates); 1211 size = ARRAY_SIZE(bxt_rates);
1200 } else if (IS_SKYLAKE(dev)) { 1212 } else if (IS_SKYLAKE(dev)) {
1201 *source_rates = skl_rates; 1213 *source_rates = skl_rates;
1202 return ARRAY_SIZE(skl_rates); 1214 size = ARRAY_SIZE(skl_rates);
1203 } else if (IS_CHERRYVIEW(dev)) { 1215 } else {
1204 *source_rates = chv_rates; 1216 *source_rates = default_rates;
1205 return ARRAY_SIZE(chv_rates); 1217 size = ARRAY_SIZE(default_rates);
1206 } 1218 }
1207 1219
1208 *source_rates = default_rates; 1220 /* This depends on the fact that 5.4 is last value in the array */
1221 if (!intel_dp_source_supports_hbr2(dev))
1222 size--;
1209 1223
1210 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) 1224 return size;
1211 /* WaDisableHBR2:skl */
1212 return (DP_LINK_BW_2_7 >> 3) + 1;
1213 else if (INTEL_INFO(dev)->gen >= 8 ||
1214 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1215 return (DP_LINK_BW_5_4 >> 3) + 1;
1216 else
1217 return (DP_LINK_BW_2_7 >> 3) + 1;
1218} 1225}
1219 1226
1220static void 1227static void
@@ -3993,9 +4000,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3993 } 4000 }
3994 } 4001 }
3995 4002
3996 /* Training Pattern 3 support, both source and sink */ 4003 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
4004 * have support for TP3 hence that check is used along with dpcd check
4005 * to ensure TP3 can be enabled.
4006 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
4007 * supported but still not enabled.
4008 */
3997 if (drm_dp_tps3_supported(intel_dp->dpcd) && 4009 if (drm_dp_tps3_supported(intel_dp->dpcd) &&
3998 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { 4010 intel_dp_source_supports_hbr2(dev)) {
3999 intel_dp->use_tps3 = true; 4011 intel_dp->use_tps3 = true;
4000 DRM_DEBUG_KMS("Displayport TPS3 supported\n"); 4012 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4001 } else 4013 } else
@@ -5166,9 +5178,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5166 5178
5167 intel_dp_probe_oui(intel_dp); 5179 intel_dp_probe_oui(intel_dp);
5168 5180
5169 if (!intel_dp_probe_mst(intel_dp)) 5181 if (!intel_dp_probe_mst(intel_dp)) {
5182 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5183 intel_dp_check_link_status(intel_dp);
5184 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5170 goto mst_fail; 5185 goto mst_fail;
5171 5186 }
5172 } else { 5187 } else {
5173 if (intel_dp->is_mst) { 5188 if (intel_dp->is_mst) {
5174 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) 5189 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -5176,10 +5191,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5176 } 5191 }
5177 5192
5178 if (!intel_dp->is_mst) { 5193 if (!intel_dp->is_mst) {
5179 /*
5180 * we'll check the link status via the normal hot plug path later -
5181 * but for short hpds we should check it now
5182 */
5183 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 5194 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5184 intel_dp_check_link_status(intel_dp); 5195 intel_dp_check_link_status(intel_dp);
5185 drm_modeset_unlock(&dev->mode_config.connection_mutex); 5196 drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -5221,16 +5232,17 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
5221 return -1; 5232 return -1;
5222} 5233}
5223 5234
5224/* check the VBT to see whether the eDP is on DP-D port */ 5235/* check the VBT to see whether the eDP is on another port */
5225bool intel_dp_is_edp(struct drm_device *dev, enum port port) 5236bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5226{ 5237{
5227 struct drm_i915_private *dev_priv = dev->dev_private; 5238 struct drm_i915_private *dev_priv = dev->dev_private;
5228 union child_device_config *p_child; 5239 union child_device_config *p_child;
5229 int i; 5240 int i;
5230 static const short port_mapping[] = { 5241 static const short port_mapping[] = {
5231 [PORT_B] = PORT_IDPB, 5242 [PORT_B] = DVO_PORT_DPB,
5232 [PORT_C] = PORT_IDPC, 5243 [PORT_C] = DVO_PORT_DPC,
5233 [PORT_D] = PORT_IDPD, 5244 [PORT_D] = DVO_PORT_DPD,
5245 [PORT_E] = DVO_PORT_DPE,
5234 }; 5246 };
5235 5247
5236 if (port == PORT_A) 5248 if (port == PORT_A)
@@ -6067,6 +6079,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6067 case PORT_D: 6079 case PORT_D:
6068 intel_encoder->hpd_pin = HPD_PORT_D; 6080 intel_encoder->hpd_pin = HPD_PORT_D;
6069 break; 6081 break;
6082 case PORT_E:
6083 intel_encoder->hpd_pin = HPD_PORT_E;
6084 break;
6070 default: 6085 default:
6071 BUG(); 6086 BUG();
6072 } 6087 }
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index dd291d123219..677d70e4d363 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,6 +33,7 @@
33static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, 33static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
34 struct intel_crtc_state *pipe_config) 34 struct intel_crtc_state *pipe_config)
35{ 35{
36 struct drm_device *dev = encoder->base.dev;
36 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); 37 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
37 struct intel_digital_port *intel_dig_port = intel_mst->primary; 38 struct intel_digital_port *intel_dig_port = intel_mst->primary;
38 struct intel_dp *intel_dp = &intel_dig_port->dp; 39 struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -88,6 +89,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
88 &pipe_config->dp_m_n); 89 &pipe_config->dp_m_n);
89 90
90 pipe_config->dp_m_n.tu = slots; 91 pipe_config->dp_m_n.tu = slots;
92
93 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
94 hsw_dp_set_ddi_pll_sel(pipe_config);
95
91 return true; 96 return true;
92 97
93} 98}
@@ -403,7 +408,7 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
403 408
404static void intel_connector_add_to_fbdev(struct intel_connector *connector) 409static void intel_connector_add_to_fbdev(struct intel_connector *connector)
405{ 410{
406#ifdef CONFIG_DRM_I915_FBDEV 411#ifdef CONFIG_DRM_FBDEV_EMULATION
407 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 412 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
408 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); 413 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
409#endif 414#endif
@@ -411,7 +416,7 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
411 416
412static void intel_connector_remove_from_fbdev(struct intel_connector *connector) 417static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
413{ 418{
414#ifdef CONFIG_DRM_I915_FBDEV 419#ifdef CONFIG_DRM_FBDEV_EMULATION
415 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 420 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
416 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); 421 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
417#endif 422#endif
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 090d67b04307..40e825d6a26f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1212,6 +1212,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
1212void intel_edp_drrs_invalidate(struct drm_device *dev, 1212void intel_edp_drrs_invalidate(struct drm_device *dev,
1213 unsigned frontbuffer_bits); 1213 unsigned frontbuffer_bits);
1214void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); 1214void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
1215void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
1215 1216
1216/* intel_dp_mst.c */ 1217/* intel_dp_mst.c */
1217int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1218int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1225,7 +1226,7 @@ void intel_dvo_init(struct drm_device *dev);
1225 1226
1226 1227
1227/* legacy fbdev emulation in intel_fbdev.c */ 1228/* legacy fbdev emulation in intel_fbdev.c */
1228#ifdef CONFIG_DRM_I915_FBDEV 1229#ifdef CONFIG_DRM_FBDEV_EMULATION
1229extern int intel_fbdev_init(struct drm_device *dev); 1230extern int intel_fbdev_init(struct drm_device *dev);
1230extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); 1231extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
1231extern void intel_fbdev_fini(struct drm_device *dev); 1232extern void intel_fbdev_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6c9351b2e3af..96476d7d7ed2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -55,13 +55,6 @@ static int intel_fbdev_set_par(struct fb_info *info)
55 ret = drm_fb_helper_set_par(info); 55 ret = drm_fb_helper_set_par(info);
56 56
57 if (ret == 0) { 57 if (ret == 0) {
58 /*
59 * FIXME: fbdev presumes that all callbacks also work from
60 * atomic contexts and relies on that for emergency oops
61 * printing. KMS totally doesn't do that and the locking here is
62 * by far not the only place this goes wrong. Ignore this for
63 * now until we solve this for real.
64 */
65 mutex_lock(&fb_helper->dev->struct_mutex); 58 mutex_lock(&fb_helper->dev->struct_mutex);
66 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 59 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
67 mutex_unlock(&fb_helper->dev->struct_mutex); 60 mutex_unlock(&fb_helper->dev->struct_mutex);
@@ -80,13 +73,6 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
80 ret = drm_fb_helper_blank(blank, info); 73 ret = drm_fb_helper_blank(blank, info);
81 74
82 if (ret == 0) { 75 if (ret == 0) {
83 /*
84 * FIXME: fbdev presumes that all callbacks also work from
85 * atomic contexts and relies on that for emergency oops
86 * printing. KMS totally doesn't do that and the locking here is
87 * by far not the only place this goes wrong. Ignore this for
88 * now until we solve this for real.
89 */
90 mutex_lock(&fb_helper->dev->struct_mutex); 76 mutex_lock(&fb_helper->dev->struct_mutex);
91 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 77 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
92 mutex_unlock(&fb_helper->dev->struct_mutex); 78 mutex_unlock(&fb_helper->dev->struct_mutex);
@@ -106,13 +92,6 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
106 ret = drm_fb_helper_pan_display(var, info); 92 ret = drm_fb_helper_pan_display(var, info);
107 93
108 if (ret == 0) { 94 if (ret == 0) {
109 /*
110 * FIXME: fbdev presumes that all callbacks also work from
111 * atomic contexts and relies on that for emergency oops
112 * printing. KMS totally doesn't do that and the locking here is
113 * by far not the only place this goes wrong. Ignore this for
114 * now until we solve this for real.
115 */
116 mutex_lock(&fb_helper->dev->struct_mutex); 95 mutex_lock(&fb_helper->dev->struct_mutex);
117 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 96 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
118 mutex_unlock(&fb_helper->dev->struct_mutex); 97 mutex_unlock(&fb_helper->dev->struct_mutex);
@@ -125,9 +104,9 @@ static struct fb_ops intelfb_ops = {
125 .owner = THIS_MODULE, 104 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var, 105 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = intel_fbdev_set_par, 106 .fb_set_par = intel_fbdev_set_par,
128 .fb_fillrect = cfb_fillrect, 107 .fb_fillrect = drm_fb_helper_cfb_fillrect,
129 .fb_copyarea = cfb_copyarea, 108 .fb_copyarea = drm_fb_helper_cfb_copyarea,
130 .fb_imageblit = cfb_imageblit, 109 .fb_imageblit = drm_fb_helper_cfb_imageblit,
131 .fb_pan_display = intel_fbdev_pan_display, 110 .fb_pan_display = intel_fbdev_pan_display,
132 .fb_blank = intel_fbdev_blank, 111 .fb_blank = intel_fbdev_blank,
133 .fb_setcmap = drm_fb_helper_setcmap, 112 .fb_setcmap = drm_fb_helper_setcmap,
@@ -236,9 +215,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
236 obj = intel_fb->obj; 215 obj = intel_fb->obj;
237 size = obj->base.size; 216 size = obj->base.size;
238 217
239 info = framebuffer_alloc(0, &dev->pdev->dev); 218 info = drm_fb_helper_alloc_fbi(helper);
240 if (!info) { 219 if (IS_ERR(info)) {
241 ret = -ENOMEM; 220 ret = PTR_ERR(info);
242 goto out_unpin; 221 goto out_unpin;
243 } 222 }
244 223
@@ -247,24 +226,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
247 fb = &ifbdev->fb->base; 226 fb = &ifbdev->fb->base;
248 227
249 ifbdev->helper.fb = fb; 228 ifbdev->helper.fb = fb;
250 ifbdev->helper.fbdev = info;
251 229
252 strcpy(info->fix.id, "inteldrmfb"); 230 strcpy(info->fix.id, "inteldrmfb");
253 231
254 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 232 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
255 info->fbops = &intelfb_ops; 233 info->fbops = &intelfb_ops;
256 234
257 ret = fb_alloc_cmap(&info->cmap, 256, 0);
258 if (ret) {
259 ret = -ENOMEM;
260 goto out_unpin;
261 }
262 /* setup aperture base/size for vesafb takeover */ 235 /* setup aperture base/size for vesafb takeover */
263 info->apertures = alloc_apertures(1);
264 if (!info->apertures) {
265 ret = -ENOMEM;
266 goto out_unpin;
267 }
268 info->apertures->ranges[0].base = dev->mode_config.fb_base; 236 info->apertures->ranges[0].base = dev->mode_config.fb_base;
269 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; 237 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
270 238
@@ -276,7 +244,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
276 size); 244 size);
277 if (!info->screen_base) { 245 if (!info->screen_base) {
278 ret = -ENOSPC; 246 ret = -ENOSPC;
279 goto out_unpin; 247 goto out_destroy_fbi;
280 } 248 }
281 info->screen_size = size; 249 info->screen_size = size;
282 250
@@ -303,6 +271,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
303 vga_switcheroo_client_fb_set(dev->pdev, info); 271 vga_switcheroo_client_fb_set(dev->pdev, info);
304 return 0; 272 return 0;
305 273
274out_destroy_fbi:
275 drm_fb_helper_release_fbi(helper);
306out_unpin: 276out_unpin:
307 i915_gem_object_ggtt_unpin(obj); 277 i915_gem_object_ggtt_unpin(obj);
308 drm_gem_object_unreference(&obj->base); 278 drm_gem_object_unreference(&obj->base);
@@ -544,16 +514,9 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
544static void intel_fbdev_destroy(struct drm_device *dev, 514static void intel_fbdev_destroy(struct drm_device *dev,
545 struct intel_fbdev *ifbdev) 515 struct intel_fbdev *ifbdev)
546{ 516{
547 if (ifbdev->helper.fbdev) {
548 struct fb_info *info = ifbdev->helper.fbdev;
549 517
550 unregister_framebuffer(info); 518 drm_fb_helper_unregister_fbi(&ifbdev->helper);
551 iounmap(info->screen_base); 519 drm_fb_helper_release_fbi(&ifbdev->helper);
552 if (info->cmap.len)
553 fb_dealloc_cmap(&info->cmap);
554
555 framebuffer_release(info);
556 }
557 520
558 drm_fb_helper_fini(&ifbdev->helper); 521 drm_fb_helper_fini(&ifbdev->helper);
559 522
@@ -802,7 +765,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
802 if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) 765 if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
803 memset_io(info->screen_base, 0, info->screen_size); 766 memset_io(info->screen_base, 0, info->screen_size);
804 767
805 fb_set_suspend(info, state); 768 drm_fb_helper_set_suspend(&ifbdev->helper, state);
806 console_unlock(); 769 console_unlock();
807} 770}
808 771
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4da737ccf69a..feb31d891482 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2020,6 +2020,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2020 struct drm_device *dev = intel_encoder->base.dev; 2020 struct drm_device *dev = intel_encoder->base.dev;
2021 struct drm_i915_private *dev_priv = dev->dev_private; 2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022 enum port port = intel_dig_port->port; 2022 enum port port = intel_dig_port->port;
2023 uint8_t alternate_ddc_pin;
2023 2024
2024 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 2025 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
2025 DRM_MODE_CONNECTOR_HDMIA); 2026 DRM_MODE_CONNECTOR_HDMIA);
@@ -2060,6 +2061,26 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2060 intel_hdmi->ddc_bus = GMBUS_PIN_DPD; 2061 intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
2061 intel_encoder->hpd_pin = HPD_PORT_D; 2062 intel_encoder->hpd_pin = HPD_PORT_D;
2062 break; 2063 break;
2064 case PORT_E:
2065 /* On SKL PORT E doesn't have seperate GMBUS pin
2066 * We rely on VBT to set a proper alternate GMBUS pin. */
2067 alternate_ddc_pin =
2068 dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
2069 switch (alternate_ddc_pin) {
2070 case DDC_PIN_B:
2071 intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
2072 break;
2073 case DDC_PIN_C:
2074 intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
2075 break;
2076 case DDC_PIN_D:
2077 intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
2078 break;
2079 default:
2080 MISSING_CASE(alternate_ddc_pin);
2081 }
2082 intel_encoder->hpd_pin = HPD_PORT_E;
2083 break;
2063 case PORT_A: 2084 case PORT_A:
2064 intel_encoder->hpd_pin = HPD_PORT_A; 2085 intel_encoder->hpd_pin = HPD_PORT_A;
2065 /* Internal port only for eDP. */ 2086 /* Internal port only for eDP. */
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 032a0bf75f3b..53c0173a39fe 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -91,6 +91,9 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
91 case HPD_PORT_D: 91 case HPD_PORT_D:
92 *port = PORT_D; 92 *port = PORT_D;
93 return true; 93 return true;
94 case HPD_PORT_E:
95 *port = PORT_E;
96 return true;
94 default: 97 default:
95 return false; /* no hpd */ 98 return false; /* no hpd */
96 } 99 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e9520afc2033..40cbba4ea4ba 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1027,6 +1027,8 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
1027 if (ret) 1027 if (ret)
1028 goto unpin_ctx_obj; 1028 goto unpin_ctx_obj;
1029 1029
1030 ctx_obj->dirty = true;
1031
1030 /* Invalidate GuC TLB. */ 1032 /* Invalidate GuC TLB. */
1031 if (i915.enable_guc_submission) 1033 if (i915.enable_guc_submission)
1032 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 1034 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b1bd25e1e853..3f682a1a08ce 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -297,6 +297,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
297 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 297 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
298 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 298 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
299 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 299 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
300 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
300 BIT(POWER_DOMAIN_AUX_B) | \ 301 BIT(POWER_DOMAIN_AUX_B) | \
301 BIT(POWER_DOMAIN_AUX_C) | \ 302 BIT(POWER_DOMAIN_AUX_C) | \
302 BIT(POWER_DOMAIN_AUX_D) | \ 303 BIT(POWER_DOMAIN_AUX_D) | \
@@ -316,6 +317,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
316#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 317#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
317 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 318 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
318 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 319 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
320 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
319 BIT(POWER_DOMAIN_INIT)) 321 BIT(POWER_DOMAIN_INIT))
320#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 322#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
321 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 323 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 9f9780b7ddf0..4f2068fe5d88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,18 +70,22 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
71 BUG_ON(pixels_current == pixels_prev); 71 BUG_ON(pixels_current == pixels_prev);
72 72
73 obj = drm_gem_object_lookup(dev, file_priv, handle);
74 if (!obj)
75 return -ENOENT;
76
73 ret = mgag200_bo_reserve(pixels_1, true); 77 ret = mgag200_bo_reserve(pixels_1, true);
74 if (ret) { 78 if (ret) {
75 WREG8(MGA_CURPOSXL, 0); 79 WREG8(MGA_CURPOSXL, 0);
76 WREG8(MGA_CURPOSXH, 0); 80 WREG8(MGA_CURPOSXH, 0);
77 return ret; 81 goto out_unref;
78 } 82 }
79 ret = mgag200_bo_reserve(pixels_2, true); 83 ret = mgag200_bo_reserve(pixels_2, true);
80 if (ret) { 84 if (ret) {
81 WREG8(MGA_CURPOSXL, 0); 85 WREG8(MGA_CURPOSXL, 0);
82 WREG8(MGA_CURPOSXH, 0); 86 WREG8(MGA_CURPOSXH, 0);
83 mgag200_bo_unreserve(pixels_1); 87 mgag200_bo_unreserve(pixels_1);
84 return ret; 88 goto out_unreserve1;
85 } 89 }
86 90
87 if (!handle) { 91 if (!handle) {
@@ -106,16 +110,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
106 } 110 }
107 } 111 }
108 112
109 mutex_lock(&dev->struct_mutex);
110 obj = drm_gem_object_lookup(dev, file_priv, handle);
111 if (!obj) {
112 mutex_unlock(&dev->struct_mutex);
113 ret = -ENOENT;
114 goto out1;
115 }
116 drm_gem_object_unreference(obj);
117 mutex_unlock(&dev->struct_mutex);
118
119 bo = gem_to_mga_bo(obj); 113 bo = gem_to_mga_bo(obj);
120 ret = mgag200_bo_reserve(bo, true); 114 ret = mgag200_bo_reserve(bo, true);
121 if (ret) { 115 if (ret) {
@@ -252,7 +246,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
252 if (ret) 246 if (ret)
253 mga_hide_cursor(mdev); 247 mga_hide_cursor(mdev);
254 mgag200_bo_unreserve(pixels_1); 248 mgag200_bo_unreserve(pixels_1);
249out_unreserve1:
255 mgag200_bo_unreserve(pixels_2); 250 mgag200_bo_unreserve(pixels_2);
251out_unref:
252 drm_gem_object_unreference_unlocked(obj);
253
256 return ret; 254 return ret;
257} 255}
258 256
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 97745991544d..b0af77454d52 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -35,6 +35,7 @@ static const struct pci_device_id pciidlist[] = {
35 { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB }, 35 { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
36 { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH }, 36 { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
37 { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER }, 37 { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
38 { PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
38 {0,} 39 {0,}
39}; 40};
40 41
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index e9eea1d4e7c3..912151c36d59 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -180,6 +180,7 @@ enum mga_type {
180 G200_EV, 180 G200_EV,
181 G200_EH, 181 G200_EH,
182 G200_ER, 182 G200_ER,
183 G200_EW3,
183}; 184};
184 185
185#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B) 186#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 958cf3cf082d..87de15ea1f93 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -101,7 +101,7 @@ static void mga_fillrect(struct fb_info *info,
101 const struct fb_fillrect *rect) 101 const struct fb_fillrect *rect)
102{ 102{
103 struct mga_fbdev *mfbdev = info->par; 103 struct mga_fbdev *mfbdev = info->par;
104 sys_fillrect(info, rect); 104 drm_fb_helper_sys_fillrect(info, rect);
105 mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width, 105 mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
106 rect->height); 106 rect->height);
107} 107}
@@ -110,7 +110,7 @@ static void mga_copyarea(struct fb_info *info,
110 const struct fb_copyarea *area) 110 const struct fb_copyarea *area)
111{ 111{
112 struct mga_fbdev *mfbdev = info->par; 112 struct mga_fbdev *mfbdev = info->par;
113 sys_copyarea(info, area); 113 drm_fb_helper_sys_copyarea(info, area);
114 mga_dirty_update(mfbdev, area->dx, area->dy, area->width, 114 mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
115 area->height); 115 area->height);
116} 116}
@@ -119,7 +119,7 @@ static void mga_imageblit(struct fb_info *info,
119 const struct fb_image *image) 119 const struct fb_image *image)
120{ 120{
121 struct mga_fbdev *mfbdev = info->par; 121 struct mga_fbdev *mfbdev = info->par;
122 sys_imageblit(info, image); 122 drm_fb_helper_sys_imageblit(info, image);
123 mga_dirty_update(mfbdev, image->dx, image->dy, image->width, 123 mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
124 image->height); 124 image->height);
125} 125}
@@ -166,7 +166,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
166 struct fb_info *info; 166 struct fb_info *info;
167 struct drm_framebuffer *fb; 167 struct drm_framebuffer *fb;
168 struct drm_gem_object *gobj = NULL; 168 struct drm_gem_object *gobj = NULL;
169 struct device *device = &dev->pdev->dev;
170 int ret; 169 int ret;
171 void *sysram; 170 void *sysram;
172 int size; 171 int size;
@@ -189,9 +188,9 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
189 if (!sysram) 188 if (!sysram)
190 return -ENOMEM; 189 return -ENOMEM;
191 190
192 info = framebuffer_alloc(0, device); 191 info = drm_fb_helper_alloc_fbi(helper);
193 if (info == NULL) 192 if (IS_ERR(info))
194 return -ENOMEM; 193 return PTR_ERR(info);
195 194
196 info->par = mfbdev; 195 info->par = mfbdev;
197 196
@@ -206,14 +205,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
206 205
207 /* setup helper */ 206 /* setup helper */
208 mfbdev->helper.fb = fb; 207 mfbdev->helper.fb = fb;
209 mfbdev->helper.fbdev = info;
210
211 ret = fb_alloc_cmap(&info->cmap, 256, 0);
212 if (ret) {
213 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
214 ret = -ENOMEM;
215 goto out;
216 }
217 208
218 strcpy(info->fix.id, "mgadrmfb"); 209 strcpy(info->fix.id, "mgadrmfb");
219 210
@@ -221,11 +212,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
221 info->fbops = &mgag200fb_ops; 212 info->fbops = &mgag200fb_ops;
222 213
223 /* setup aperture base/size for vesafb takeover */ 214 /* setup aperture base/size for vesafb takeover */
224 info->apertures = alloc_apertures(1);
225 if (!info->apertures) {
226 ret = -ENOMEM;
227 goto out;
228 }
229 info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base; 215 info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
230 info->apertures->ranges[0].size = mdev->mc.vram_size; 216 info->apertures->ranges[0].size = mdev->mc.vram_size;
231 217
@@ -240,24 +226,15 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
240 DRM_DEBUG_KMS("allocated %dx%d\n", 226 DRM_DEBUG_KMS("allocated %dx%d\n",
241 fb->width, fb->height); 227 fb->width, fb->height);
242 return 0; 228 return 0;
243out:
244 return ret;
245} 229}
246 230
247static int mga_fbdev_destroy(struct drm_device *dev, 231static int mga_fbdev_destroy(struct drm_device *dev,
248 struct mga_fbdev *mfbdev) 232 struct mga_fbdev *mfbdev)
249{ 233{
250 struct fb_info *info;
251 struct mga_framebuffer *mfb = &mfbdev->mfb; 234 struct mga_framebuffer *mfb = &mfbdev->mfb;
252 235
253 if (mfbdev->helper.fbdev) { 236 drm_fb_helper_unregister_fbi(&mfbdev->helper);
254 info = mfbdev->helper.fbdev; 237 drm_fb_helper_release_fbi(&mfbdev->helper);
255
256 unregister_framebuffer(info);
257 if (info->cmap.len)
258 fb_dealloc_cmap(&info->cmap);
259 framebuffer_release(info);
260 }
261 238
262 if (mfb->obj) { 239 if (mfb->obj) {
263 drm_gem_object_unreference_unlocked(mfb->obj); 240 drm_gem_object_unreference_unlocked(mfb->obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index d3dcf54e6233..10535e3b75f2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -101,6 +101,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
101 case G200_SE_B: 101 case G200_SE_B:
102 case G200_EV: 102 case G200_EV:
103 case G200_WB: 103 case G200_WB:
104 case G200_EW3:
104 data = 1; 105 data = 1;
105 clock = 2; 106 clock = 2;
106 break; 107 break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index f6b283b8375e..de06388069e7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -82,12 +82,19 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
82 int orig; 82 int orig;
83 int test1, test2; 83 int test1, test2;
84 int orig1, orig2; 84 int orig1, orig2;
85 unsigned int vram_size;
85 86
86 /* Probe */ 87 /* Probe */
87 orig = ioread16(mem); 88 orig = ioread16(mem);
88 iowrite16(0, mem); 89 iowrite16(0, mem);
89 90
90 for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) { 91 vram_size = mdev->mc.vram_window;
92
93 if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
94 vram_size = vram_size - 0x400000;
95 }
96
97 for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
91 orig1 = ioread8(mem + offset); 98 orig1 = ioread8(mem + offset);
92 orig2 = ioread8(mem + offset + 0x100); 99 orig2 = ioread8(mem + offset + 0x100);
93 100
@@ -345,23 +352,15 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
345 uint64_t *offset) 352 uint64_t *offset)
346{ 353{
347 struct drm_gem_object *obj; 354 struct drm_gem_object *obj;
348 int ret;
349 struct mgag200_bo *bo; 355 struct mgag200_bo *bo;
350 356
351 mutex_lock(&dev->struct_mutex);
352 obj = drm_gem_object_lookup(dev, file, handle); 357 obj = drm_gem_object_lookup(dev, file, handle);
353 if (obj == NULL) { 358 if (obj == NULL)
354 ret = -ENOENT; 359 return -ENOENT;
355 goto out_unlock;
356 }
357 360
358 bo = gem_to_mga_bo(obj); 361 bo = gem_to_mga_bo(obj);
359 *offset = mgag200_bo_mmap_offset(bo); 362 *offset = mgag200_bo_mmap_offset(bo);
360 363
361 drm_gem_object_unreference(obj); 364 drm_gem_object_unreference_unlocked(obj);
362 ret = 0; 365 return 0;
363out_unlock:
364 mutex_unlock(&dev->struct_mutex);
365 return ret;
366
367} 366}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index cd75cff096e1..c99d3fe12881 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -104,6 +104,8 @@ static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
104 return true; 104 return true;
105} 105}
106 106
107#define P_ARRAY_SIZE 9
108
107static int mga_g200se_set_plls(struct mga_device *mdev, long clock) 109static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
108{ 110{
109 unsigned int vcomax, vcomin, pllreffreq; 111 unsigned int vcomax, vcomin, pllreffreq;
@@ -111,37 +113,97 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
111 unsigned int testp, testm, testn; 113 unsigned int testp, testm, testn;
112 unsigned int p, m, n; 114 unsigned int p, m, n;
113 unsigned int computed; 115 unsigned int computed;
116 unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
117 unsigned int fvv;
118 unsigned int i;
119
120 if (mdev->unique_rev_id <= 0x03) {
121
122 m = n = p = 0;
123 vcomax = 320000;
124 vcomin = 160000;
125 pllreffreq = 25000;
126
127 delta = 0xffffffff;
128 permitteddelta = clock * 5 / 1000;
129
130 for (testp = 8; testp > 0; testp /= 2) {
131 if (clock * testp > vcomax)
132 continue;
133 if (clock * testp < vcomin)
134 continue;
135
136 for (testn = 17; testn < 256; testn++) {
137 for (testm = 1; testm < 32; testm++) {
138 computed = (pllreffreq * testn) /
139 (testm * testp);
140 if (computed > clock)
141 tmpdelta = computed - clock;
142 else
143 tmpdelta = clock - computed;
144 if (tmpdelta < delta) {
145 delta = tmpdelta;
146 m = testm - 1;
147 n = testn - 1;
148 p = testp - 1;
149 }
150 }
151 }
152 }
153 } else {
114 154
115 m = n = p = 0;
116 vcomax = 320000;
117 vcomin = 160000;
118 pllreffreq = 25000;
119 155
120 delta = 0xffffffff; 156 m = n = p = 0;
121 permitteddelta = clock * 5 / 1000; 157 vcomax = 1600000;
158 vcomin = 800000;
159 pllreffreq = 25000;
122 160
123 for (testp = 8; testp > 0; testp /= 2) { 161 if (clock < 25000)
124 if (clock * testp > vcomax) 162 clock = 25000;
125 continue;
126 if (clock * testp < vcomin)
127 continue;
128 163
129 for (testn = 17; testn < 256; testn++) { 164 clock = clock * 2;
130 for (testm = 1; testm < 32; testm++) { 165
131 computed = (pllreffreq * testn) / 166 delta = 0xFFFFFFFF;
132 (testm * testp); 167 /* Permited delta is 0.5% as VESA Specification */
133 if (computed > clock) 168 permitteddelta = clock * 5 / 1000;
134 tmpdelta = computed - clock; 169
135 else 170 for (i = 0 ; i < P_ARRAY_SIZE ; i++) {
136 tmpdelta = clock - computed; 171 testp = pvalues_e4[i];
137 if (tmpdelta < delta) { 172
138 delta = tmpdelta; 173 if ((clock * testp) > vcomax)
139 m = testm - 1; 174 continue;
140 n = testn - 1; 175 if ((clock * testp) < vcomin)
141 p = testp - 1; 176 continue;
177
178 for (testn = 50; testn <= 256; testn++) {
179 for (testm = 1; testm <= 32; testm++) {
180 computed = (pllreffreq * testn) /
181 (testm * testp);
182 if (computed > clock)
183 tmpdelta = computed - clock;
184 else
185 tmpdelta = clock - computed;
186
187 if (tmpdelta < delta) {
188 delta = tmpdelta;
189 m = testm - 1;
190 n = testn - 1;
191 p = testp - 1;
192 }
142 } 193 }
143 } 194 }
144 } 195 }
196
197 fvv = pllreffreq * testn / testm;
198 fvv = (fvv - 800000) / 50000;
199
200 if (fvv > 15)
201 fvv = 15;
202
203 p |= (fvv << 4);
204 m |= 0x80;
205
206 clock = clock / 2;
145 } 207 }
146 208
147 if (delta > permitteddelta) { 209 if (delta > permitteddelta) {
@@ -159,7 +221,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
159{ 221{
160 unsigned int vcomax, vcomin, pllreffreq; 222 unsigned int vcomax, vcomin, pllreffreq;
161 unsigned int delta, tmpdelta; 223 unsigned int delta, tmpdelta;
162 unsigned int testp, testm, testn; 224 unsigned int testp, testm, testn, testp2;
163 unsigned int p, m, n; 225 unsigned int p, m, n;
164 unsigned int computed; 226 unsigned int computed;
165 int i, j, tmpcount, vcount; 227 int i, j, tmpcount, vcount;
@@ -167,31 +229,71 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
167 u8 tmp; 229 u8 tmp;
168 230
169 m = n = p = 0; 231 m = n = p = 0;
170 vcomax = 550000;
171 vcomin = 150000;
172 pllreffreq = 48000;
173 232
174 delta = 0xffffffff; 233 delta = 0xffffffff;
175 234
176 for (testp = 1; testp < 9; testp++) { 235 if (mdev->type == G200_EW3) {
177 if (clock * testp > vcomax) 236
178 continue; 237 vcomax = 800000;
179 if (clock * testp < vcomin) 238 vcomin = 400000;
180 continue; 239 pllreffreq = 25000;
240
241 for (testp = 1; testp < 8; testp++) {
242 for (testp2 = 1; testp2 < 8; testp2++) {
243 if (testp < testp2)
244 continue;
245 if ((clock * testp * testp2) > vcomax)
246 continue;
247 if ((clock * testp * testp2) < vcomin)
248 continue;
249 for (testm = 1; testm < 26; testm++) {
250 for (testn = 32; testn < 2048 ; testn++) {
251 computed = (pllreffreq * testn) /
252 (testm * testp * testp2);
253 if (computed > clock)
254 tmpdelta = computed - clock;
255 else
256 tmpdelta = clock - computed;
257 if (tmpdelta < delta) {
258 delta = tmpdelta;
259 m = ((testn & 0x100) >> 1) |
260 (testm);
261 n = (testn & 0xFF);
262 p = ((testn & 0x600) >> 3) |
263 (testp2 << 3) |
264 (testp);
265 }
266 }
267 }
268 }
269 }
270 } else {
181 271
182 for (testm = 1; testm < 17; testm++) { 272 vcomax = 550000;
183 for (testn = 1; testn < 151; testn++) { 273 vcomin = 150000;
184 computed = (pllreffreq * testn) / 274 pllreffreq = 48000;
185 (testm * testp); 275
186 if (computed > clock) 276 for (testp = 1; testp < 9; testp++) {
187 tmpdelta = computed - clock; 277 if (clock * testp > vcomax)
188 else 278 continue;
189 tmpdelta = clock - computed; 279 if (clock * testp < vcomin)
190 if (tmpdelta < delta) { 280 continue;
191 delta = tmpdelta; 281
192 n = testn - 1; 282 for (testm = 1; testm < 17; testm++) {
193 m = (testm - 1) | ((n >> 1) & 0x80); 283 for (testn = 1; testn < 151; testn++) {
194 p = testp - 1; 284 computed = (pllreffreq * testn) /
285 (testm * testp);
286 if (computed > clock)
287 tmpdelta = computed - clock;
288 else
289 tmpdelta = clock - computed;
290 if (tmpdelta < delta) {
291 delta = tmpdelta;
292 n = testn - 1;
293 m = (testm - 1) |
294 ((n >> 1) & 0x80);
295 p = testp - 1;
296 }
195 } 297 }
196 } 298 }
197 } 299 }
@@ -569,6 +671,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
569 return mga_g200se_set_plls(mdev, clock); 671 return mga_g200se_set_plls(mdev, clock);
570 break; 672 break;
571 case G200_WB: 673 case G200_WB:
674 case G200_EW3:
572 return mga_g200wb_set_plls(mdev, clock); 675 return mga_g200wb_set_plls(mdev, clock);
573 break; 676 break;
574 case G200_EV: 677 case G200_EV:
@@ -820,6 +923,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
820 option2 = 0x00008000; 923 option2 = 0x00008000;
821 break; 924 break;
822 case G200_WB: 925 case G200_WB:
926 case G200_EW3:
823 dacvalue[MGA1064_VREF_CTL] = 0x07; 927 dacvalue[MGA1064_VREF_CTL] = 0x07;
824 option = 0x41049120; 928 option = 0x41049120;
825 option2 = 0x0000b000; 929 option2 = 0x0000b000;
@@ -875,7 +979,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
875 if (IS_G200_SE(mdev) && 979 if (IS_G200_SE(mdev) &&
876 ((i == 0x2c) || (i == 0x2d) || (i == 0x2e))) 980 ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
877 continue; 981 continue;
878 if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) && 982 if ((mdev->type == G200_EV ||
983 mdev->type == G200_WB ||
984 mdev->type == G200_EH ||
985 mdev->type == G200_EW3) &&
879 (i >= 0x44) && (i <= 0x4e)) 986 (i >= 0x44) && (i <= 0x4e))
880 continue; 987 continue;
881 988
@@ -977,7 +1084,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
977 else 1084 else
978 ext_vga[3] = ((1 << bppshift) - 1) | 0x80; 1085 ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
979 ext_vga[4] = 0; 1086 ext_vga[4] = 0;
980 if (mdev->type == G200_WB) 1087 if (mdev->type == G200_WB || mdev->type == G200_EW3)
981 ext_vga[1] |= 0x88; 1088 ext_vga[1] |= 0x88;
982 1089
983 /* Set pixel clocks */ 1090 /* Set pixel clocks */
@@ -993,6 +1100,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
993 if (mdev->type == G200_ER) 1100 if (mdev->type == G200_ER)
994 WREG_ECRT(0x24, 0x5); 1101 WREG_ECRT(0x24, 0x5);
995 1102
1103 if (mdev->type == G200_EW3)
1104 WREG_ECRT(0x34, 0x5);
1105
996 if (mdev->type == G200_EV) { 1106 if (mdev->type == G200_EV) {
997 WREG_ECRT(6, 0); 1107 WREG_ECRT(6, 0);
998 } 1108 }
@@ -1205,7 +1315,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
1205 WREG_SEQ(1, tmp | 0x20); 1315 WREG_SEQ(1, tmp | 0x20);
1206 } 1316 }
1207 1317
1208 if (mdev->type == G200_WB) 1318 if (mdev->type == G200_WB || mdev->type == G200_EW3)
1209 mga_g200wb_prepare(crtc); 1319 mga_g200wb_prepare(crtc);
1210 1320
1211 WREG_CRT(17, 0); 1321 WREG_CRT(17, 0);
@@ -1222,7 +1332,7 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
1222 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 1332 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1223 u8 tmp; 1333 u8 tmp;
1224 1334
1225 if (mdev->type == G200_WB) 1335 if (mdev->type == G200_WB || mdev->type == G200_EW3)
1226 mga_g200wb_commit(crtc); 1336 mga_g200wb_commit(crtc);
1227 1337
1228 if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) { 1338 if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
@@ -1492,7 +1602,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1492 if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1602 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1493 > (24400 * 1024)) 1603 > (24400 * 1024))
1494 return MODE_BANDWIDTH; 1604 return MODE_BANDWIDTH;
1495 } else if (mdev->unique_rev_id >= 0x02) { 1605 } else if (mdev->unique_rev_id == 0x02) {
1496 if (mode->hdisplay > 1920) 1606 if (mode->hdisplay > 1920)
1497 return MODE_VIRTUAL_X; 1607 return MODE_VIRTUAL_X;
1498 if (mode->vdisplay > 1200) 1608 if (mode->vdisplay > 1200)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 08ba8d0d93f5..8e6c7c638e24 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -9,6 +9,7 @@ config DRM_MSM
9 select DRM_PANEL 9 select DRM_PANEL
10 select SHMEM 10 select SHMEM
11 select TMPFS 11 select TMPFS
12 select QCOM_SCM
12 default y 13 default y
13 help 14 help
14 DRM/KMS driver for MSM/snapdragon. 15 DRM/KMS driver for MSM/snapdragon.
@@ -53,3 +54,17 @@ config DRM_MSM_DSI_PLL
53 help 54 help
54 Choose this option to enable DSI PLL driver which provides DSI 55 Choose this option to enable DSI PLL driver which provides DSI
55 source clocks under common clock framework. 56 source clocks under common clock framework.
57
58config DRM_MSM_DSI_28NM_PHY
59 bool "Enable DSI 28nm PHY driver in MSM DRM"
60 depends on DRM_MSM_DSI
61 default y
62 help
63 Choose this option if the 28nm DSI PHY is used on the platform.
64
65config DRM_MSM_DSI_20NM_PHY
66 bool "Enable DSI 20nm PHY driver in MSM DRM"
67 depends on DRM_MSM_DSI
68 default y
69 help
70 Choose this option if the 20nm DSI PHY is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 16a81b94d6f0..0a543eb5e5d7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,5 @@
1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm 1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
2ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi 2ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
3 3
4msm-y := \ 4msm-y := \
5 adreno/adreno_device.o \ 5 adreno/adreno_device.o \
@@ -10,6 +10,7 @@ msm-y := \
10 hdmi/hdmi_audio.o \ 10 hdmi/hdmi_audio.o \
11 hdmi/hdmi_bridge.o \ 11 hdmi/hdmi_bridge.o \
12 hdmi/hdmi_connector.o \ 12 hdmi/hdmi_connector.o \
13 hdmi/hdmi_hdcp.o \
13 hdmi/hdmi_i2c.o \ 14 hdmi/hdmi_i2c.o \
14 hdmi/hdmi_phy_8960.o \ 15 hdmi/hdmi_phy_8960.o \
15 hdmi/hdmi_phy_8x60.o \ 16 hdmi/hdmi_phy_8x60.o \
@@ -53,12 +54,18 @@ msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
53msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 54msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
54 55
55msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ 56msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
57 dsi/dsi_cfg.o \
56 dsi/dsi_host.o \ 58 dsi/dsi_host.o \
57 dsi/dsi_manager.o \ 59 dsi/dsi_manager.o \
58 dsi/dsi_phy.o \ 60 dsi/phy/dsi_phy.o \
59 mdp/mdp5/mdp5_cmd_encoder.o 61 mdp/mdp5/mdp5_cmd_encoder.o
60 62
61msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \ 63msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
62 dsi/pll/dsi_pll_28nm.o 64msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
65
66ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
67msm-y += dsi/pll/dsi_pll.o
68msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
69endif
63 70
64obj-$(CONFIG_DRM_MSM) += msm.o 71obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 23176e402796..0261f0d31612 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 1c599e5cf318..48d133711487 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
18 18
19Copyright (C) 2013-2015 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -326,6 +326,13 @@ enum a3xx_tex_type {
326 A3XX_TEX_3D = 3, 326 A3XX_TEX_3D = 3,
327}; 327};
328 328
329enum a3xx_tex_msaa {
330 A3XX_TPL1_MSAA1X = 0,
331 A3XX_TPL1_MSAA2X = 1,
332 A3XX_TPL1_MSAA4X = 2,
333 A3XX_TPL1_MSAA8X = 3,
334};
335
329#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001 336#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
330#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002 337#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
331#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004 338#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
@@ -2652,6 +2659,7 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
2652#define REG_A3XX_VGT_IMMED_DATA 0x000021fd 2659#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
2653 2660
2654#define REG_A3XX_TEX_SAMP_0 0x00000000 2661#define REG_A3XX_TEX_SAMP_0 0x00000000
2662#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001
2655#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 2663#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
2656#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c 2664#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2657#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 2665#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
@@ -2695,6 +2703,7 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
2695{ 2703{
2696 return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK; 2704 return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
2697} 2705}
2706#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000
2698#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 2707#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2699 2708
2700#define REG_A3XX_TEX_SAMP_1 0x00000001 2709#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2750,6 +2759,12 @@ static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
2750{ 2759{
2751 return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK; 2760 return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
2752} 2761}
2762#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000
2763#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20
2764static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
2765{
2766 return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
2767}
2753#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 2768#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2754#define A3XX_TEX_CONST_0_FMT__SHIFT 22 2769#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2755static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) 2770static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
@@ -2785,7 +2800,7 @@ static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
2785} 2800}
2786 2801
2787#define REG_A3XX_TEX_CONST_2 0x00000002 2802#define REG_A3XX_TEX_CONST_2 0x00000002
2788#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff 2803#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff
2789#define A3XX_TEX_CONST_2_INDX__SHIFT 0 2804#define A3XX_TEX_CONST_2_INDX__SHIFT 0
2790static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val) 2805static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
2791{ 2806{
@@ -2805,7 +2820,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2805} 2820}
2806 2821
2807#define REG_A3XX_TEX_CONST_3 0x00000003 2822#define REG_A3XX_TEX_CONST_3 0x00000003
2808#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x00007fff 2823#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff
2809#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0 2824#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
2810static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val) 2825static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
2811{ 2826{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3f06ecf62583..ac55066db3b0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
18 18
19Copyright (C) 2013-2015 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -227,6 +227,7 @@ enum a4xx_depth_format {
227 DEPTH4_NONE = 0, 227 DEPTH4_NONE = 0,
228 DEPTH4_16 = 1, 228 DEPTH4_16 = 1,
229 DEPTH4_24_8 = 2, 229 DEPTH4_24_8 = 2,
230 DEPTH4_32 = 3,
230}; 231};
231 232
232enum a4xx_tess_spacing { 233enum a4xx_tess_spacing {
@@ -429,7 +430,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
429 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; 430 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
430} 431}
431#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000 432#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
432#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000 433#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000
433#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14 434#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
434static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) 435static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
435{ 436{
@@ -439,7 +440,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
439static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; } 440static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
440 441
441static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; } 442static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
442#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8 443#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8
443#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3 444#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
444static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val) 445static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
445{ 446{
@@ -570,6 +571,15 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
570 return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK; 571 return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
571} 572}
572 573
574#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa
575#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
576#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc
577#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
578static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
579{
580 return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
581}
582
573#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb 583#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
574#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f 584#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
575#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 585#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
@@ -811,6 +821,23 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
811#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107 821#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
812#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001 822#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
813 823
824#define REG_A4XX_RB_STENCIL_INFO 0x00002108
825#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
826#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
827#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
828static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
829{
830 return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
831}
832
833#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
834#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff
835#define A4XX_RB_STENCIL_PITCH__SHIFT 0
836static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
837{
838 return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
839}
840
814#define REG_A4XX_RB_STENCILREFMASK 0x0000210b 841#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
815#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff 842#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
816#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 843#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
@@ -1433,6 +1460,7 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
1433{ 1460{
1434 return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK; 1461 return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
1435} 1462}
1463#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000
1436 1464
1437#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300 1465#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
1438 1466
@@ -1470,6 +1498,76 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1470 1498
1471#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312 1499#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
1472 1500
1501#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a
1502#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff
1503#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0
1504static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
1505{
1506 return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
1507}
1508#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
1509#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
1510static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
1511{
1512 return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
1513}
1514
1515static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
1516
1517static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
1518#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
1519#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
1520static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
1521{
1522 return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
1523}
1524#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1525#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9
1526static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
1527{
1528 return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
1529}
1530#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000
1531#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
1532static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
1533{
1534 return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
1535}
1536#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1537#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25
1538static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
1539{
1540 return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
1541}
1542
1543static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
1544
1545static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
1546#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1547#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
1548static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
1549{
1550 return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
1551}
1552#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1553#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
1554static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
1555{
1556 return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
1557}
1558#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1559#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
1560static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
1561{
1562 return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
1563}
1564#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1565#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
1566static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
1567{
1568 return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
1569}
1570
1473#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334 1571#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
1474#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 1572#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1475#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 1573#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1492,6 +1590,82 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1492 1590
1493#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339 1591#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
1494 1592
1593#define REG_A4XX_SP_GS_PARAM_REG 0x00002341
1594#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff
1595#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0
1596static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
1597{
1598 return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
1599}
1600#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00
1601#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8
1602static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
1603{
1604 return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
1605}
1606#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
1607#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
1608static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
1609{
1610 return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
1611}
1612
1613static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
1614
1615static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
1616#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
1617#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
1618static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
1619{
1620 return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
1621}
1622#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1623#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9
1624static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
1625{
1626 return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
1627}
1628#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000
1629#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
1630static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
1631{
1632 return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
1633}
1634#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1635#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25
1636static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
1637{
1638 return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
1639}
1640
1641static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
1642
1643static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
1644#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1645#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
1646static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
1647{
1648 return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
1649}
1650#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1651#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
1652static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
1653{
1654 return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
1655}
1656#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1657#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
1658static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
1659{
1660 return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
1661}
1662#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1663#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
1664static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
1665{
1666 return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
1667}
1668
1495#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b 1669#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
1496#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 1670#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1497#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 1671#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1693,6 +1867,18 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
1693{ 1867{
1694 return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; 1868 return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
1695} 1869}
1870#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
1871#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
1872static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
1873{
1874 return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
1875}
1876#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
1877#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
1878static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
1879{
1880 return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
1881}
1696 1882
1697#define REG_A4XX_VFD_CONTROL_4 0x00002204 1883#define REG_A4XX_VFD_CONTROL_4 0x00002204
1698 1884
@@ -2489,6 +2675,8 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val)
2489 2675
2490#define REG_A4XX_UNKNOWN_22D7 0x000022d7 2676#define REG_A4XX_UNKNOWN_22D7 0x000022d7
2491 2677
2678#define REG_A4XX_UNKNOWN_2352 0x00002352
2679
2492#define REG_A4XX_TEX_SAMP_0 0x00000000 2680#define REG_A4XX_TEX_SAMP_0 0x00000000
2493#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 2681#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
2494#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 2682#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9562a1fa552b..399a9e528139 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index bd5b23bf9041..41904fed1350 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
18 18
19Copyright (C) 2013-2015 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -67,7 +67,7 @@ enum vgt_event_type {
67 67
68enum pc_di_primtype { 68enum pc_di_primtype {
69 DI_PT_NONE = 0, 69 DI_PT_NONE = 0,
70 DI_PT_POINTLIST_A2XX = 1, 70 DI_PT_POINTLIST_PSIZE = 1,
71 DI_PT_LINELIST = 2, 71 DI_PT_LINELIST = 2,
72 DI_PT_LINESTRIP = 3, 72 DI_PT_LINESTRIP = 3,
73 DI_PT_TRILIST = 4, 73 DI_PT_TRILIST = 4,
@@ -75,7 +75,7 @@ enum pc_di_primtype {
75 DI_PT_TRISTRIP = 6, 75 DI_PT_TRISTRIP = 6,
76 DI_PT_LINELOOP = 7, 76 DI_PT_LINELOOP = 7,
77 DI_PT_RECTLIST = 8, 77 DI_PT_RECTLIST = 8,
78 DI_PT_POINTLIST_A3XX = 9, 78 DI_PT_POINTLIST = 9,
79 DI_PT_LINE_ADJ = 10, 79 DI_PT_LINE_ADJ = 10,
80 DI_PT_LINESTRIP_ADJ = 11, 80 DI_PT_LINESTRIP_ADJ = 11,
81 DI_PT_TRI_ADJ = 12, 81 DI_PT_TRI_ADJ = 12,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1f2561e2ff71..6edcd6f57e70 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -15,10 +15,10 @@
15 15
16struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) 16struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
17{ 17{
18 if (!msm_dsi || !msm_dsi->panel) 18 if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
19 return NULL; 19 return NULL;
20 20
21 return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ? 21 return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
22 msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : 22 msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
23 msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; 23 msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
24} 24}
@@ -74,19 +74,15 @@ static void dsi_destroy(struct msm_dsi *msm_dsi)
74 74
75static struct msm_dsi *dsi_init(struct platform_device *pdev) 75static struct msm_dsi *dsi_init(struct platform_device *pdev)
76{ 76{
77 struct msm_dsi *msm_dsi = NULL; 77 struct msm_dsi *msm_dsi;
78 int ret; 78 int ret;
79 79
80 if (!pdev) { 80 if (!pdev)
81 ret = -ENXIO; 81 return ERR_PTR(-ENXIO);
82 goto fail;
83 }
84 82
85 msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL); 83 msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
86 if (!msm_dsi) { 84 if (!msm_dsi)
87 ret = -ENOMEM; 85 return ERR_PTR(-ENOMEM);
88 goto fail;
89 }
90 DBG("dsi probed=%p", msm_dsi); 86 DBG("dsi probed=%p", msm_dsi);
91 87
92 msm_dsi->pdev = pdev; 88 msm_dsi->pdev = pdev;
@@ -95,24 +91,22 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
95 /* Init dsi host */ 91 /* Init dsi host */
96 ret = msm_dsi_host_init(msm_dsi); 92 ret = msm_dsi_host_init(msm_dsi);
97 if (ret) 93 if (ret)
98 goto fail; 94 goto destroy_dsi;
99 95
100 /* GET dsi PHY */ 96 /* GET dsi PHY */
101 ret = dsi_get_phy(msm_dsi); 97 ret = dsi_get_phy(msm_dsi);
102 if (ret) 98 if (ret)
103 goto fail; 99 goto destroy_dsi;
104 100
105 /* Register to dsi manager */ 101 /* Register to dsi manager */
106 ret = msm_dsi_manager_register(msm_dsi); 102 ret = msm_dsi_manager_register(msm_dsi);
107 if (ret) 103 if (ret)
108 goto fail; 104 goto destroy_dsi;
109 105
110 return msm_dsi; 106 return msm_dsi;
111 107
112fail: 108destroy_dsi:
113 if (msm_dsi) 109 dsi_destroy(msm_dsi);
114 dsi_destroy(msm_dsi);
115
116 return ERR_PTR(ret); 110 return ERR_PTR(ret);
117} 111}
118 112
@@ -196,6 +190,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
196 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) 190 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
197{ 191{
198 struct msm_drm_private *priv = dev->dev_private; 192 struct msm_drm_private *priv = dev->dev_private;
193 struct drm_bridge *ext_bridge;
199 int ret, i; 194 int ret, i;
200 195
201 if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || 196 if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
@@ -223,10 +218,25 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
223 msm_dsi->encoders[i] = encoders[i]; 218 msm_dsi->encoders[i] = encoders[i];
224 } 219 }
225 220
226 msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id); 221 /*
222 * check if the dsi encoder output is connected to a panel or an
223 * external bridge. We create a connector only if we're connected to a
224 * drm_panel device. When we're connected to an external bridge, we
225 * assume that the drm_bridge driver will create the connector itself.
226 */
227 ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
228
229 if (ext_bridge)
230 msm_dsi->connector =
231 msm_dsi_manager_ext_bridge_init(msm_dsi->id);
232 else
233 msm_dsi->connector =
234 msm_dsi_manager_connector_init(msm_dsi->id);
235
227 if (IS_ERR(msm_dsi->connector)) { 236 if (IS_ERR(msm_dsi->connector)) {
228 ret = PTR_ERR(msm_dsi->connector); 237 ret = PTR_ERR(msm_dsi->connector);
229 dev_err(dev->dev, "failed to create dsi connector: %d\n", ret); 238 dev_err(dev->dev,
239 "failed to create dsi connector: %d\n", ret);
230 msm_dsi->connector = NULL; 240 msm_dsi->connector = NULL;
231 goto fail; 241 goto fail;
232 } 242 }
@@ -242,10 +252,12 @@ fail:
242 msm_dsi_manager_bridge_destroy(msm_dsi->bridge); 252 msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
243 msm_dsi->bridge = NULL; 253 msm_dsi->bridge = NULL;
244 } 254 }
245 if (msm_dsi->connector) { 255
256 /* don't destroy connector if we didn't make it */
257 if (msm_dsi->connector && !msm_dsi->external_bridge)
246 msm_dsi->connector->funcs->destroy(msm_dsi->connector); 258 msm_dsi->connector->funcs->destroy(msm_dsi->connector);
247 msm_dsi->connector = NULL; 259
248 } 260 msm_dsi->connector = NULL;
249 } 261 }
250 262
251 return ret; 263 return ret;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 92d697de4858..5f5a3732cdf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,21 +27,10 @@
27#define DSI_1 1 27#define DSI_1 1
28#define DSI_MAX 2 28#define DSI_MAX 2
29 29
30#define DSI_CLOCK_MASTER DSI_0
31#define DSI_CLOCK_SLAVE DSI_1
32
33#define DSI_LEFT DSI_0
34#define DSI_RIGHT DSI_1
35
36/* According to the current drm framework sequence, take the encoder of
37 * DSI_1 as master encoder
38 */
39#define DSI_ENCODER_MASTER DSI_1
40#define DSI_ENCODER_SLAVE DSI_0
41
42enum msm_dsi_phy_type { 30enum msm_dsi_phy_type {
43 MSM_DSI_PHY_28NM_HPM, 31 MSM_DSI_PHY_28NM_HPM,
44 MSM_DSI_PHY_28NM_LP, 32 MSM_DSI_PHY_28NM_LP,
33 MSM_DSI_PHY_20NM,
45 MSM_DSI_PHY_MAX 34 MSM_DSI_PHY_MAX
46}; 35};
47 36
@@ -65,13 +54,21 @@ struct msm_dsi {
65 struct drm_device *dev; 54 struct drm_device *dev;
66 struct platform_device *pdev; 55 struct platform_device *pdev;
67 56
57 /* connector managed by us when we're connected to a drm_panel */
68 struct drm_connector *connector; 58 struct drm_connector *connector;
59 /* internal dsi bridge attached to MDP interface */
69 struct drm_bridge *bridge; 60 struct drm_bridge *bridge;
70 61
71 struct mipi_dsi_host *host; 62 struct mipi_dsi_host *host;
72 struct msm_dsi_phy *phy; 63 struct msm_dsi_phy *phy;
64
65 /*
66 * panel/external_bridge connected to dsi bridge output, only one of the
67 * two can be valid at a time
68 */
73 struct drm_panel *panel; 69 struct drm_panel *panel;
74 unsigned long panel_flags; 70 struct drm_bridge *external_bridge;
71 unsigned long device_flags;
75 72
76 struct device *phy_dev; 73 struct device *phy_dev;
77 bool phy_enabled; 74 bool phy_enabled;
@@ -86,6 +83,7 @@ struct msm_dsi {
86struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); 83struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
87void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); 84void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
88struct drm_connector *msm_dsi_manager_connector_init(u8 id); 85struct drm_connector *msm_dsi_manager_connector_init(u8 id);
86struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
89int msm_dsi_manager_phy_enable(int id, 87int msm_dsi_manager_phy_enable(int id,
90 const unsigned long bit_rate, const unsigned long esc_rate, 88 const unsigned long bit_rate, const unsigned long esc_rate,
91 u32 *clk_pre, u32 *clk_post); 89 u32 *clk_pre, u32 *clk_post);
@@ -96,6 +94,11 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
96void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); 94void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
97 95
98/* msm dsi */ 96/* msm dsi */
97static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
98{
99 return msm_dsi->panel || msm_dsi->external_bridge;
100}
101
99struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi); 102struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
100 103
101/* dsi pll */ 104/* dsi pll */
@@ -106,6 +109,8 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
106void msm_dsi_pll_destroy(struct msm_dsi_pll *pll); 109void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
107int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, 110int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
108 struct clk **byte_clk_provider, struct clk **pixel_clk_provider); 111 struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
112void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
113int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
109#else 114#else
110static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, 115static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
111 enum msm_dsi_phy_type type, int id) { 116 enum msm_dsi_phy_type type, int id) {
@@ -119,6 +124,13 @@ static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
119{ 124{
120 return -ENODEV; 125 return -ENODEV;
121} 126}
127static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
128{
129}
130static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
131{
132 return 0;
133}
122#endif 134#endif
123 135
124/* dsi host */ 136/* dsi host */
@@ -140,6 +152,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
140 struct drm_display_mode *mode); 152 struct drm_display_mode *mode);
141struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 153struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
142 unsigned long *panel_flags); 154 unsigned long *panel_flags);
155struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
143int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); 156int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
144void msm_dsi_host_unregister(struct mipi_dsi_host *host); 157void msm_dsi_host_unregister(struct mipi_dsi_host *host);
145int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, 158int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
@@ -153,9 +166,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
153struct msm_dsi_phy; 166struct msm_dsi_phy;
154void msm_dsi_phy_driver_register(void); 167void msm_dsi_phy_driver_register(void);
155void msm_dsi_phy_driver_unregister(void); 168void msm_dsi_phy_driver_unregister(void);
156int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, 169int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
157 const unsigned long bit_rate, const unsigned long esc_rate); 170 const unsigned long bit_rate, const unsigned long esc_rate);
158int msm_dsi_phy_disable(struct msm_dsi_phy *phy); 171void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
159void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, 172void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
160 u32 *clk_pre, u32 *clk_post); 173 u32 *clk_pre, u32 *clk_post);
161struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); 174struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 9791ea04bcbc..1d2e32f0817b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -382,6 +382,11 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
382#define REG_DSI_TRIG_DMA 0x0000008c 382#define REG_DSI_TRIG_DMA 0x0000008c
383 383
384#define REG_DSI_DLN0_PHY_ERR 0x000000b0 384#define REG_DSI_DLN0_PHY_ERR 0x000000b0
385#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001
386#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010
387#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100
388#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000
389#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000
385 390
386#define REG_DSI_TIMEOUT_STATUS 0x000000bc 391#define REG_DSI_TIMEOUT_STATUS 0x000000bc
387 392
@@ -435,6 +440,9 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
435#define REG_DSI_PHY_RESET 0x00000128 440#define REG_DSI_PHY_RESET 0x00000128
436#define DSI_PHY_RESET_RESET 0x00000001 441#define DSI_PHY_RESET_RESET 0x00000001
437 442
443#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
444#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
445
438#define REG_DSI_RDBK_DATA_CTRL 0x000001d0 446#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
439#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000 447#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
440#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16 448#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
@@ -830,6 +838,7 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
830#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8 838#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
831 839
832#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4 840#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
841#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
833 842
834#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc 843#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
835 844
@@ -994,5 +1003,185 @@ static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
994 1003
995#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4 1004#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
996 1005
1006static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
1007
1008static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
1009
1010static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
1011
1012static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
1013
1014static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
1015
1016static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
1017
1018static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
1019
1020static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
1021
1022static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
1023
1024static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
1025
1026#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
1027
1028#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
1029
1030#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
1031
1032#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
1033
1034#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
1035
1036#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
1037
1038#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
1039
1040#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
1041
1042#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
1043
1044#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
1045#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
1046#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
1047static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
1048{
1049 return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
1050}
1051
1052#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
1053#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
1054#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
1055static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
1056{
1057 return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
1058}
1059
1060#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
1061#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
1062#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
1063static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
1064{
1065 return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
1066}
1067
1068#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
1069#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
1070
1071#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
1072#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
1073#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
1074static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
1075{
1076 return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
1077}
1078
1079#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
1080#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
1081#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
1082static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
1083{
1084 return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
1085}
1086
1087#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
1088#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
1089#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
1090static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
1091{
1092 return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
1093}
1094
1095#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
1096#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
1097#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
1098static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
1099{
1100 return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
1101}
1102
1103#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
1104#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
1105#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
1106static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
1107{
1108 return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
1109}
1110
1111#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
1112#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
1113#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
1114static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
1115{
1116 return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
1117}
1118#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
1119#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
1120static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
1121{
1122 return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
1123}
1124
1125#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
1126#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
1127#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
1128static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
1129{
1130 return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
1131}
1132
1133#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
1134#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
1135#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
1136static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
1137{
1138 return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
1139}
1140
1141#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
1142
1143#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
1144
1145#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
1146
1147#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
1148
1149#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
1150
1151#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
1152
1153#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
1154
1155#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
1156
1157#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
1158
1159#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
1160
1161#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
1162
1163#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
1164
1165#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
1166
1167#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
1168#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
1169
1170#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
1171
1172#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
1173
1174#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
1175
1176#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
1177
1178#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
1179
1180#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
1181
1182#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
1183
1184#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
1185
997 1186
998#endif /* DSI_XML */ 1187#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 000000000000..5872d5e5934f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi_cfg.h"
15
16/* DSI v2 has not been supported by now */
17static const struct msm_dsi_config dsi_v2_cfg = {
18 .io_offset = 0,
19};
20
21static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
22 .io_offset = DSI_6G_REG_SHIFT,
23 .reg_cfg = {
24 .num = 4,
25 .regs = {
26 {"gdsc", -1, -1, -1, -1},
27 {"vdd", 3000000, 3000000, 150000, 100},
28 {"vdda", 1200000, 1200000, 100000, 100},
29 {"vddio", 1800000, 1800000, 100000, 100},
30 },
31 },
32};
33
34static const struct msm_dsi_config msm8916_dsi_cfg = {
35 .io_offset = DSI_6G_REG_SHIFT,
36 .reg_cfg = {
37 .num = 4,
38 .regs = {
39 {"gdsc", -1, -1, -1, -1},
40 {"vdd", 2850000, 2850000, 100000, 100},
41 {"vdda", 1200000, 1200000, 100000, 100},
42 {"vddio", 1800000, 1800000, 100000, 100},
43 },
44 },
45};
46
47static const struct msm_dsi_config msm8994_dsi_cfg = {
48 .io_offset = DSI_6G_REG_SHIFT,
49 .reg_cfg = {
50 .num = 7,
51 .regs = {
52 {"gdsc", -1, -1, -1, -1},
53 {"vdda", 1250000, 1250000, 100000, 100},
54 {"vddio", 1800000, 1800000, 100000, 100},
55 {"vcca", 1000000, 1000000, 10000, 100},
56 {"vdd", 1800000, 1800000, 100000, 100},
57 {"lab_reg", -1, -1, -1, -1},
58 {"ibb_reg", -1, -1, -1, -1},
59 },
60 }
61};
62
63static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
64 {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
65 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
66 &msm8974_apq8084_dsi_cfg},
67 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
68 &msm8974_apq8084_dsi_cfg},
69 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
70 &msm8974_apq8084_dsi_cfg},
71 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
72 &msm8974_apq8084_dsi_cfg},
73 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
74 {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
75};
76
77const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
78{
79 const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
80 int i;
81
82 for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
83 if ((dsi_cfg_handlers[i].major == major) &&
84 (dsi_cfg_handlers[i].minor == minor)) {
85 cfg_hnd = &dsi_cfg_handlers[i];
86 break;
87 }
88 }
89
90 return cfg_hnd;
91}
92
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 000000000000..4cf887240177
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MSM_DSI_CFG_H__
15#define __MSM_DSI_CFG_H__
16
17#include "dsi.h"
18
19#define MSM_DSI_VER_MAJOR_V2 0x02
20#define MSM_DSI_VER_MAJOR_6G 0x03
21#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
22#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
23#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
24#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
25#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
26#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
27
28#define DSI_6G_REG_SHIFT 4
29
30struct msm_dsi_config {
31 u32 io_offset;
32 struct dsi_reg_config reg_cfg;
33};
34
35struct msm_dsi_cfg_handler {
36 u32 major;
37 u32 minor;
38 const struct msm_dsi_config *cfg;
39};
40
41const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
42
43#endif /* __MSM_DSI_CFG_H__ */
44
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index de0400923303..8d82973fe9db 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -20,103 +20,15 @@
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/pinctrl/consumer.h>
24#include <linux/of_graph.h>
23#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
24#include <linux/spinlock.h> 26#include <linux/spinlock.h>
25#include <video/mipi_display.h> 27#include <video/mipi_display.h>
26 28
27#include "dsi.h" 29#include "dsi.h"
28#include "dsi.xml.h" 30#include "dsi.xml.h"
29 31#include "dsi_cfg.h"
30#define MSM_DSI_VER_MAJOR_V2 0x02
31#define MSM_DSI_VER_MAJOR_6G 0x03
32#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
33#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
34#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
35#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
36#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
37
38#define DSI_6G_REG_SHIFT 4
39
40struct dsi_config {
41 u32 major;
42 u32 minor;
43 u32 io_offset;
44 struct dsi_reg_config reg_cfg;
45};
46
47static const struct dsi_config dsi_cfgs[] = {
48 {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
49 { /* 8974 v1 */
50 .major = MSM_DSI_VER_MAJOR_6G,
51 .minor = MSM_DSI_6G_VER_MINOR_V1_0,
52 .io_offset = DSI_6G_REG_SHIFT,
53 .reg_cfg = {
54 .num = 4,
55 .regs = {
56 {"gdsc", -1, -1, -1, -1},
57 {"vdd", 3000000, 3000000, 150000, 100},
58 {"vdda", 1200000, 1200000, 100000, 100},
59 {"vddio", 1800000, 1800000, 100000, 100},
60 },
61 },
62 },
63 { /* 8974 v2 */
64 .major = MSM_DSI_VER_MAJOR_6G,
65 .minor = MSM_DSI_6G_VER_MINOR_V1_1,
66 .io_offset = DSI_6G_REG_SHIFT,
67 .reg_cfg = {
68 .num = 4,
69 .regs = {
70 {"gdsc", -1, -1, -1, -1},
71 {"vdd", 3000000, 3000000, 150000, 100},
72 {"vdda", 1200000, 1200000, 100000, 100},
73 {"vddio", 1800000, 1800000, 100000, 100},
74 },
75 },
76 },
77 { /* 8974 v3 */
78 .major = MSM_DSI_VER_MAJOR_6G,
79 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
80 .io_offset = DSI_6G_REG_SHIFT,
81 .reg_cfg = {
82 .num = 4,
83 .regs = {
84 {"gdsc", -1, -1, -1, -1},
85 {"vdd", 3000000, 3000000, 150000, 100},
86 {"vdda", 1200000, 1200000, 100000, 100},
87 {"vddio", 1800000, 1800000, 100000, 100},
88 },
89 },
90 },
91 { /* 8084 */
92 .major = MSM_DSI_VER_MAJOR_6G,
93 .minor = MSM_DSI_6G_VER_MINOR_V1_2,
94 .io_offset = DSI_6G_REG_SHIFT,
95 .reg_cfg = {
96 .num = 4,
97 .regs = {
98 {"gdsc", -1, -1, -1, -1},
99 {"vdd", 3000000, 3000000, 150000, 100},
100 {"vdda", 1200000, 1200000, 100000, 100},
101 {"vddio", 1800000, 1800000, 100000, 100},
102 },
103 },
104 },
105 { /* 8916 */
106 .major = MSM_DSI_VER_MAJOR_6G,
107 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
108 .io_offset = DSI_6G_REG_SHIFT,
109 .reg_cfg = {
110 .num = 4,
111 .regs = {
112 {"gdsc", -1, -1, -1, -1},
113 {"vdd", 2850000, 2850000, 100000, 100},
114 {"vdda", 1200000, 1200000, 100000, 100},
115 {"vddio", 1800000, 1800000, 100000, 100},
116 },
117 },
118 },
119};
120 32
121static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 33static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
122{ 34{
@@ -194,7 +106,7 @@ struct msm_dsi_host {
194 struct gpio_desc *disp_en_gpio; 106 struct gpio_desc *disp_en_gpio;
195 struct gpio_desc *te_gpio; 107 struct gpio_desc *te_gpio;
196 108
197 const struct dsi_config *cfg; 109 const struct msm_dsi_cfg_handler *cfg_hnd;
198 110
199 struct completion dma_comp; 111 struct completion dma_comp;
200 struct completion video_comp; 112 struct completion video_comp;
@@ -212,8 +124,8 @@ struct msm_dsi_host {
212 124
213 struct drm_display_mode *mode; 125 struct drm_display_mode *mode;
214 126
215 /* Panel info */ 127 /* connected device info */
216 struct device_node *panel_node; 128 struct device_node *device_node;
217 unsigned int channel; 129 unsigned int channel;
218 unsigned int lanes; 130 unsigned int lanes;
219 enum mipi_dsi_pixel_format format; 131 enum mipi_dsi_pixel_format format;
@@ -239,61 +151,58 @@ static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
239 151
240static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) 152static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
241{ 153{
242 return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg); 154 return msm_readl(msm_host->ctrl_base + reg);
243} 155}
244static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) 156static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
245{ 157{
246 msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg); 158 msm_writel(data, msm_host->ctrl_base + reg);
247} 159}
248 160
249static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); 161static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
250static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); 162static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
251 163
252static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host) 164static const struct msm_dsi_cfg_handler *dsi_get_config(
165 struct msm_dsi_host *msm_host)
253{ 166{
254 const struct dsi_config *cfg; 167 const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
255 struct regulator *gdsc_reg; 168 struct regulator *gdsc_reg;
256 int i, ret; 169 int ret;
257 u32 major = 0, minor = 0; 170 u32 major = 0, minor = 0;
258 171
259 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); 172 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
260 if (IS_ERR(gdsc_reg)) { 173 if (IS_ERR(gdsc_reg)) {
261 pr_err("%s: cannot get gdsc\n", __func__); 174 pr_err("%s: cannot get gdsc\n", __func__);
262 goto fail; 175 goto exit;
263 } 176 }
264 ret = regulator_enable(gdsc_reg); 177 ret = regulator_enable(gdsc_reg);
265 if (ret) { 178 if (ret) {
266 pr_err("%s: unable to enable gdsc\n", __func__); 179 pr_err("%s: unable to enable gdsc\n", __func__);
267 regulator_put(gdsc_reg); 180 goto put_gdsc;
268 goto fail;
269 } 181 }
270 ret = clk_prepare_enable(msm_host->ahb_clk); 182 ret = clk_prepare_enable(msm_host->ahb_clk);
271 if (ret) { 183 if (ret) {
272 pr_err("%s: unable to enable ahb_clk\n", __func__); 184 pr_err("%s: unable to enable ahb_clk\n", __func__);
273 regulator_disable(gdsc_reg); 185 goto disable_gdsc;
274 regulator_put(gdsc_reg);
275 goto fail;
276 } 186 }
277 187
278 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); 188 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
279
280 clk_disable_unprepare(msm_host->ahb_clk);
281 regulator_disable(gdsc_reg);
282 regulator_put(gdsc_reg);
283 if (ret) { 189 if (ret) {
284 pr_err("%s: Invalid version\n", __func__); 190 pr_err("%s: Invalid version\n", __func__);
285 goto fail; 191 goto disable_clks;
286 } 192 }
287 193
288 for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) { 194 cfg_hnd = msm_dsi_cfg_get(major, minor);
289 cfg = dsi_cfgs + i;
290 if ((cfg->major == major) && (cfg->minor == minor))
291 return cfg;
292 }
293 pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
294 195
295fail: 196 DBG("%s: Version %x:%x\n", __func__, major, minor);
296 return NULL; 197
198disable_clks:
199 clk_disable_unprepare(msm_host->ahb_clk);
200disable_gdsc:
201 regulator_disable(gdsc_reg);
202put_gdsc:
203 regulator_put(gdsc_reg);
204exit:
205 return cfg_hnd;
297} 206}
298 207
299static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) 208static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
@@ -304,8 +213,8 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
304static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) 213static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
305{ 214{
306 struct regulator_bulk_data *s = msm_host->supplies; 215 struct regulator_bulk_data *s = msm_host->supplies;
307 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; 216 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
308 int num = msm_host->cfg->reg_cfg.num; 217 int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
309 int i; 218 int i;
310 219
311 DBG(""); 220 DBG("");
@@ -320,8 +229,8 @@ static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
320static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) 229static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
321{ 230{
322 struct regulator_bulk_data *s = msm_host->supplies; 231 struct regulator_bulk_data *s = msm_host->supplies;
323 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; 232 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
324 int num = msm_host->cfg->reg_cfg.num; 233 int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
325 int ret, i; 234 int ret, i;
326 235
327 DBG(""); 236 DBG("");
@@ -354,8 +263,8 @@ fail:
354static int dsi_regulator_init(struct msm_dsi_host *msm_host) 263static int dsi_regulator_init(struct msm_dsi_host *msm_host)
355{ 264{
356 struct regulator_bulk_data *s = msm_host->supplies; 265 struct regulator_bulk_data *s = msm_host->supplies;
357 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; 266 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
358 int num = msm_host->cfg->reg_cfg.num; 267 int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
359 int i, ret; 268 int i, ret;
360 269
361 for (i = 0; i < num; i++) 270 for (i = 0; i < num; i++)
@@ -697,6 +606,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
697{ 606{
698 u32 flags = msm_host->mode_flags; 607 u32 flags = msm_host->mode_flags;
699 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; 608 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
609 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
700 u32 data = 0; 610 u32 data = 0;
701 611
702 if (!enable) { 612 if (!enable) {
@@ -750,8 +660,8 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
750 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); 660 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
751 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); 661 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
752 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); 662 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
753 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && 663 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
754 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) 664 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
755 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; 665 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
756 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); 666 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
757 667
@@ -1257,7 +1167,11 @@ static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1257 1167
1258 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); 1168 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1259 1169
1260 if (status) { 1170 if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1171 DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1172 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1173 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1174 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1261 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); 1175 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1262 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; 1176 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1263 } 1177 }
@@ -1359,7 +1273,8 @@ static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1359 return PTR_ERR(msm_host->disp_en_gpio); 1273 return PTR_ERR(msm_host->disp_en_gpio);
1360 } 1274 }
1361 1275
1362 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN); 1276 msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1277 GPIOD_IN);
1363 if (IS_ERR(msm_host->te_gpio)) { 1278 if (IS_ERR(msm_host->te_gpio)) {
1364 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); 1279 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1365 return PTR_ERR(msm_host->te_gpio); 1280 return PTR_ERR(msm_host->te_gpio);
@@ -1379,7 +1294,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
1379 msm_host->format = dsi->format; 1294 msm_host->format = dsi->format;
1380 msm_host->mode_flags = dsi->mode_flags; 1295 msm_host->mode_flags = dsi->mode_flags;
1381 1296
1382 msm_host->panel_node = dsi->dev.of_node; 1297 WARN_ON(dsi->dev.of_node != msm_host->device_node);
1383 1298
1384 /* Some gpios defined in panel DT need to be controlled by host */ 1299 /* Some gpios defined in panel DT need to be controlled by host */
1385 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); 1300 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1398,7 +1313,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
1398{ 1313{
1399 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1314 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1400 1315
1401 msm_host->panel_node = NULL; 1316 msm_host->device_node = NULL;
1402 1317
1403 DBG("id=%d", msm_host->id); 1318 DBG("id=%d", msm_host->id);
1404 if (msm_host->dev) 1319 if (msm_host->dev)
@@ -1429,6 +1344,48 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
1429 .transfer = dsi_host_transfer, 1344 .transfer = dsi_host_transfer,
1430}; 1345};
1431 1346
1347static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1348{
1349 struct device *dev = &msm_host->pdev->dev;
1350 struct device_node *np = dev->of_node;
1351 struct device_node *endpoint, *device_node;
1352 int ret;
1353
1354 ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
1355 if (ret) {
1356 dev_err(dev, "%s: host index not specified, ret=%d\n",
1357 __func__, ret);
1358 return ret;
1359 }
1360
1361 /*
1362 * Get the first endpoint node. In our case, dsi has one output port
1363 * to which the panel is connected. Don't return an error if a port
1364 * isn't defined. It's possible that there is nothing connected to
1365 * the dsi output.
1366 */
1367 endpoint = of_graph_get_next_endpoint(np, NULL);
1368 if (!endpoint) {
1369 dev_dbg(dev, "%s: no endpoint\n", __func__);
1370 return 0;
1371 }
1372
1373 /* Get panel node from the output port's endpoint data */
1374 device_node = of_graph_get_remote_port_parent(endpoint);
1375 if (!device_node) {
1376 dev_err(dev, "%s: no valid device\n", __func__);
1377 of_node_put(endpoint);
1378 return -ENODEV;
1379 }
1380
1381 of_node_put(endpoint);
1382 of_node_put(device_node);
1383
1384 msm_host->device_node = device_node;
1385
1386 return 0;
1387}
1388
1432int msm_dsi_host_init(struct msm_dsi *msm_dsi) 1389int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1433{ 1390{
1434 struct msm_dsi_host *msm_host = NULL; 1391 struct msm_dsi_host *msm_host = NULL;
@@ -1443,15 +1400,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1443 goto fail; 1400 goto fail;
1444 } 1401 }
1445 1402
1446 ret = of_property_read_u32(pdev->dev.of_node, 1403 msm_host->pdev = pdev;
1447 "qcom,dsi-host-index", &msm_host->id); 1404
1405 ret = dsi_host_parse_dt(msm_host);
1448 if (ret) { 1406 if (ret) {
1449 dev_err(&pdev->dev, 1407 pr_err("%s: failed to parse dt\n", __func__);
1450 "%s: host index not specified, ret=%d\n",
1451 __func__, ret);
1452 goto fail; 1408 goto fail;
1453 } 1409 }
1454 msm_host->pdev = pdev;
1455 1410
1456 ret = dsi_clk_init(msm_host); 1411 ret = dsi_clk_init(msm_host);
1457 if (ret) { 1412 if (ret) {
@@ -1466,13 +1421,16 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1466 goto fail; 1421 goto fail;
1467 } 1422 }
1468 1423
1469 msm_host->cfg = dsi_get_config(msm_host); 1424 msm_host->cfg_hnd = dsi_get_config(msm_host);
1470 if (!msm_host->cfg) { 1425 if (!msm_host->cfg_hnd) {
1471 ret = -EINVAL; 1426 ret = -EINVAL;
1472 pr_err("%s: get config failed\n", __func__); 1427 pr_err("%s: get config failed\n", __func__);
1473 goto fail; 1428 goto fail;
1474 } 1429 }
1475 1430
1431 /* fixup base address by io offset */
1432 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1433
1476 ret = dsi_regulator_init(msm_host); 1434 ret = dsi_regulator_init(msm_host);
1477 if (ret) { 1435 if (ret) {
1478 pr_err("%s: regulator init failed\n", __func__); 1436 pr_err("%s: regulator init failed\n", __func__);
@@ -1559,7 +1517,6 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1559int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) 1517int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1560{ 1518{
1561 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1519 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1562 struct device_node *node;
1563 int ret; 1520 int ret;
1564 1521
1565 /* Register mipi dsi host */ 1522 /* Register mipi dsi host */
@@ -1577,14 +1534,13 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1577 * It makes sure panel is connected when fbcon detects 1534 * It makes sure panel is connected when fbcon detects
1578 * connector status and gets the proper display mode to 1535 * connector status and gets the proper display mode to
1579 * create framebuffer. 1536 * create framebuffer.
1537 * Don't try to defer if there is nothing connected to the dsi
1538 * output
1580 */ 1539 */
1581 if (check_defer) { 1540 if (check_defer && msm_host->device_node) {
1582 node = of_get_child_by_name(msm_host->pdev->dev.of_node, 1541 if (!of_drm_find_panel(msm_host->device_node))
1583 "panel"); 1542 if (!of_drm_find_bridge(msm_host->device_node))
1584 if (node) {
1585 if (!of_drm_find_panel(node))
1586 return -EPROBE_DEFER; 1543 return -EPROBE_DEFER;
1587 }
1588 } 1544 }
1589 } 1545 }
1590 1546
@@ -1663,6 +1619,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1663 const struct mipi_dsi_msg *msg) 1619 const struct mipi_dsi_msg *msg)
1664{ 1620{
1665 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1621 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1622 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1666 int data_byte, rx_byte, dlen, end; 1623 int data_byte, rx_byte, dlen, end;
1667 int short_response, diff, pkt_size, ret = 0; 1624 int short_response, diff, pkt_size, ret = 0;
1668 char cmd; 1625 char cmd;
@@ -1704,8 +1661,8 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1704 return -EINVAL; 1661 return -EINVAL;
1705 } 1662 }
1706 1663
1707 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && 1664 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
1708 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { 1665 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1709 /* Clear the RDBK_DATA registers */ 1666 /* Clear the RDBK_DATA registers */
1710 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 1667 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1711 DSI_RDBK_DATA_CTRL_CLR); 1668 DSI_RDBK_DATA_CTRL_CLR);
@@ -1919,6 +1876,13 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1919 goto fail_disable_reg; 1876 goto fail_disable_reg;
1920 } 1877 }
1921 1878
1879 ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
1880 if (ret) {
1881 pr_err("%s: failed to set pinctrl default state, %d\n",
1882 __func__, ret);
1883 goto fail_disable_clk;
1884 }
1885
1922 dsi_timing_setup(msm_host); 1886 dsi_timing_setup(msm_host);
1923 dsi_sw_reset(msm_host); 1887 dsi_sw_reset(msm_host);
1924 dsi_ctrl_config(msm_host, true, clk_pre, clk_post); 1888 dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
@@ -1931,6 +1895,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1931 1895
1932 return 0; 1896 return 0;
1933 1897
1898fail_disable_clk:
1899 dsi_clk_ctrl(msm_host, 0);
1934fail_disable_reg: 1900fail_disable_reg:
1935 dsi_host_regulator_disable(msm_host); 1901 dsi_host_regulator_disable(msm_host);
1936unlock_ret: 1902unlock_ret:
@@ -1953,6 +1919,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
1953 if (msm_host->disp_en_gpio) 1919 if (msm_host->disp_en_gpio)
1954 gpiod_set_value(msm_host->disp_en_gpio, 0); 1920 gpiod_set_value(msm_host->disp_en_gpio, 0);
1955 1921
1922 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
1923
1956 msm_dsi_manager_phy_disable(msm_host->id); 1924 msm_dsi_manager_phy_disable(msm_host->id);
1957 1925
1958 dsi_clk_ctrl(msm_host, 0); 1926 dsi_clk_ctrl(msm_host, 0);
@@ -1993,10 +1961,16 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
1993 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1961 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1994 struct drm_panel *panel; 1962 struct drm_panel *panel;
1995 1963
1996 panel = of_drm_find_panel(msm_host->panel_node); 1964 panel = of_drm_find_panel(msm_host->device_node);
1997 if (panel_flags) 1965 if (panel_flags)
1998 *panel_flags = msm_host->mode_flags; 1966 *panel_flags = msm_host->mode_flags;
1999 1967
2000 return panel; 1968 return panel;
2001} 1969}
2002 1970
1971struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
1972{
1973 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1974
1975 return of_drm_find_bridge(msm_host->device_node);
1976}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 87ac6612b6f8..0455ff75074a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -14,19 +14,31 @@
14#include "msm_kms.h" 14#include "msm_kms.h"
15#include "dsi.h" 15#include "dsi.h"
16 16
17#define DSI_CLOCK_MASTER DSI_0
18#define DSI_CLOCK_SLAVE DSI_1
19
20#define DSI_LEFT DSI_0
21#define DSI_RIGHT DSI_1
22
23/* According to the current drm framework sequence, take the encoder of
24 * DSI_1 as master encoder
25 */
26#define DSI_ENCODER_MASTER DSI_1
27#define DSI_ENCODER_SLAVE DSI_0
28
17struct msm_dsi_manager { 29struct msm_dsi_manager {
18 struct msm_dsi *dsi[DSI_MAX]; 30 struct msm_dsi *dsi[DSI_MAX];
19 31
20 bool is_dual_panel; 32 bool is_dual_dsi;
21 bool is_sync_needed; 33 bool is_sync_needed;
22 int master_panel_id; 34 int master_dsi_link_id;
23}; 35};
24 36
25static struct msm_dsi_manager msm_dsim_glb; 37static struct msm_dsi_manager msm_dsim_glb;
26 38
27#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel) 39#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi)
28#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) 40#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
29#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id) 41#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
30 42
31static inline struct msm_dsi *dsi_mgr_get_dsi(int id) 43static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
32{ 44{
@@ -38,23 +50,23 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
38 return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; 50 return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
39} 51}
40 52
41static int dsi_mgr_parse_dual_panel(struct device_node *np, int id) 53static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
42{ 54{
43 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; 55 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
44 56
45 /* We assume 2 dsi nodes have the same information of dual-panel and 57 /* We assume 2 dsi nodes have the same information of dual-dsi and
46 * sync-mode, and only one node specifies master in case of dual mode. 58 * sync-mode, and only one node specifies master in case of dual mode.
47 */ 59 */
48 if (!msm_dsim->is_dual_panel) 60 if (!msm_dsim->is_dual_dsi)
49 msm_dsim->is_dual_panel = of_property_read_bool( 61 msm_dsim->is_dual_dsi = of_property_read_bool(
50 np, "qcom,dual-panel-mode"); 62 np, "qcom,dual-dsi-mode");
51 63
52 if (msm_dsim->is_dual_panel) { 64 if (msm_dsim->is_dual_dsi) {
53 if (of_property_read_bool(np, "qcom,master-panel")) 65 if (of_property_read_bool(np, "qcom,master-dsi"))
54 msm_dsim->master_panel_id = id; 66 msm_dsim->master_dsi_link_id = id;
55 if (!msm_dsim->is_sync_needed) 67 if (!msm_dsim->is_sync_needed)
56 msm_dsim->is_sync_needed = of_property_read_bool( 68 msm_dsim->is_sync_needed = of_property_read_bool(
57 np, "qcom,sync-dual-panel"); 69 np, "qcom,sync-dual-dsi");
58 } 70 }
59 71
60 return 0; 72 return 0;
@@ -68,7 +80,7 @@ static int dsi_mgr_host_register(int id)
68 struct msm_dsi_pll *src_pll; 80 struct msm_dsi_pll *src_pll;
69 int ret; 81 int ret;
70 82
71 if (!IS_DUAL_PANEL()) { 83 if (!IS_DUAL_DSI()) {
72 ret = msm_dsi_host_register(msm_dsi->host, true); 84 ret = msm_dsi_host_register(msm_dsi->host, true);
73 if (ret) 85 if (ret)
74 return ret; 86 return ret;
@@ -78,9 +90,9 @@ static int dsi_mgr_host_register(int id)
78 } else if (!other_dsi) { 90 } else if (!other_dsi) {
79 ret = 0; 91 ret = 0;
80 } else { 92 } else {
81 struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ? 93 struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
82 msm_dsi : other_dsi; 94 msm_dsi : other_dsi;
83 struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ? 95 struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
84 other_dsi : msm_dsi; 96 other_dsi : msm_dsi;
85 /* Register slave host first, so that slave DSI device 97 /* Register slave host first, so that slave DSI device
86 * has a chance to probe, and do not block the master 98 * has a chance to probe, and do not block the master
@@ -144,28 +156,28 @@ static enum drm_connector_status dsi_mgr_connector_detect(
144 DBG("id=%d", id); 156 DBG("id=%d", id);
145 if (!msm_dsi->panel) { 157 if (!msm_dsi->panel) {
146 msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host, 158 msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
147 &msm_dsi->panel_flags); 159 &msm_dsi->device_flags);
148 160
149 /* There is only 1 panel in the global panel list 161 /* There is only 1 panel in the global panel list
150 * for dual panel mode. Therefore slave dsi should get 162 * for dual DSI mode. Therefore slave dsi should get
151 * the drm_panel instance from master dsi, and 163 * the drm_panel instance from master dsi, and
152 * keep using the panel flags got from the current DSI link. 164 * keep using the panel flags got from the current DSI link.
153 */ 165 */
154 if (!msm_dsi->panel && IS_DUAL_PANEL() && 166 if (!msm_dsi->panel && IS_DUAL_DSI() &&
155 !IS_MASTER_PANEL(id) && other_dsi) 167 !IS_MASTER_DSI_LINK(id) && other_dsi)
156 msm_dsi->panel = msm_dsi_host_get_panel( 168 msm_dsi->panel = msm_dsi_host_get_panel(
157 other_dsi->host, NULL); 169 other_dsi->host, NULL);
158 170
159 if (msm_dsi->panel && IS_DUAL_PANEL()) 171 if (msm_dsi->panel && IS_DUAL_DSI())
160 drm_object_attach_property(&connector->base, 172 drm_object_attach_property(&connector->base,
161 connector->dev->mode_config.tile_property, 0); 173 connector->dev->mode_config.tile_property, 0);
162 174
163 /* Set split display info to kms once dual panel is connected 175 /* Set split display info to kms once dual DSI panel is
164 * to both hosts 176 * connected to both hosts.
165 */ 177 */
166 if (msm_dsi->panel && IS_DUAL_PANEL() && 178 if (msm_dsi->panel && IS_DUAL_DSI() &&
167 other_dsi && other_dsi->panel) { 179 other_dsi && other_dsi->panel) {
168 bool cmd_mode = !(msm_dsi->panel_flags & 180 bool cmd_mode = !(msm_dsi->device_flags &
169 MIPI_DSI_MODE_VIDEO); 181 MIPI_DSI_MODE_VIDEO);
170 struct drm_encoder *encoder = msm_dsi_get_encoder( 182 struct drm_encoder *encoder = msm_dsi_get_encoder(
171 dsi_mgr_get_dsi(DSI_ENCODER_MASTER)); 183 dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
@@ -176,7 +188,7 @@ static enum drm_connector_status dsi_mgr_connector_detect(
176 kms->funcs->set_split_display(kms, encoder, 188 kms->funcs->set_split_display(kms, encoder,
177 slave_enc, cmd_mode); 189 slave_enc, cmd_mode);
178 else 190 else
179 pr_err("mdp does not support dual panel\n"); 191 pr_err("mdp does not support dual DSI\n");
180 } 192 }
181 } 193 }
182 194
@@ -273,7 +285,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
273 if (!num) 285 if (!num)
274 return 0; 286 return 0;
275 287
276 if (IS_DUAL_PANEL()) { 288 if (IS_DUAL_DSI()) {
277 /* report half resolution to user */ 289 /* report half resolution to user */
278 dsi_dual_connector_fix_modes(connector); 290 dsi_dual_connector_fix_modes(connector);
279 ret = dsi_dual_connector_tile_init(connector, id); 291 ret = dsi_dual_connector_tile_init(connector, id);
@@ -328,11 +340,12 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
328 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); 340 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
329 struct mipi_dsi_host *host = msm_dsi->host; 341 struct mipi_dsi_host *host = msm_dsi->host;
330 struct drm_panel *panel = msm_dsi->panel; 342 struct drm_panel *panel = msm_dsi->panel;
331 bool is_dual_panel = IS_DUAL_PANEL(); 343 bool is_dual_dsi = IS_DUAL_DSI();
332 int ret; 344 int ret;
333 345
334 DBG("id=%d", id); 346 DBG("id=%d", id);
335 if (!panel || (is_dual_panel && (DSI_1 == id))) 347 if (!msm_dsi_device_connected(msm_dsi) ||
348 (is_dual_dsi && (DSI_1 == id)))
336 return; 349 return;
337 350
338 ret = msm_dsi_host_power_on(host); 351 ret = msm_dsi_host_power_on(host);
@@ -341,7 +354,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
341 goto host_on_fail; 354 goto host_on_fail;
342 } 355 }
343 356
344 if (is_dual_panel && msm_dsi1) { 357 if (is_dual_dsi && msm_dsi1) {
345 ret = msm_dsi_host_power_on(msm_dsi1->host); 358 ret = msm_dsi_host_power_on(msm_dsi1->host);
346 if (ret) { 359 if (ret) {
347 pr_err("%s: power on host1 failed, %d\n", 360 pr_err("%s: power on host1 failed, %d\n",
@@ -353,10 +366,13 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
353 /* Always call panel functions once, because even for dual panels, 366 /* Always call panel functions once, because even for dual panels,
354 * there is only one drm_panel instance. 367 * there is only one drm_panel instance.
355 */ 368 */
356 ret = drm_panel_prepare(panel); 369 if (panel) {
357 if (ret) { 370 ret = drm_panel_prepare(panel);
358 pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret); 371 if (ret) {
359 goto panel_prep_fail; 372 pr_err("%s: prepare panel %d failed, %d\n", __func__,
373 id, ret);
374 goto panel_prep_fail;
375 }
360 } 376 }
361 377
362 ret = msm_dsi_host_enable(host); 378 ret = msm_dsi_host_enable(host);
@@ -365,7 +381,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
365 goto host_en_fail; 381 goto host_en_fail;
366 } 382 }
367 383
368 if (is_dual_panel && msm_dsi1) { 384 if (is_dual_dsi && msm_dsi1) {
369 ret = msm_dsi_host_enable(msm_dsi1->host); 385 ret = msm_dsi_host_enable(msm_dsi1->host);
370 if (ret) { 386 if (ret) {
371 pr_err("%s: enable host1 failed, %d\n", __func__, ret); 387 pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -373,23 +389,27 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
373 } 389 }
374 } 390 }
375 391
376 ret = drm_panel_enable(panel); 392 if (panel) {
377 if (ret) { 393 ret = drm_panel_enable(panel);
378 pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret); 394 if (ret) {
379 goto panel_en_fail; 395 pr_err("%s: enable panel %d failed, %d\n", __func__, id,
396 ret);
397 goto panel_en_fail;
398 }
380 } 399 }
381 400
382 return; 401 return;
383 402
384panel_en_fail: 403panel_en_fail:
385 if (is_dual_panel && msm_dsi1) 404 if (is_dual_dsi && msm_dsi1)
386 msm_dsi_host_disable(msm_dsi1->host); 405 msm_dsi_host_disable(msm_dsi1->host);
387host1_en_fail: 406host1_en_fail:
388 msm_dsi_host_disable(host); 407 msm_dsi_host_disable(host);
389host_en_fail: 408host_en_fail:
390 drm_panel_unprepare(panel); 409 if (panel)
410 drm_panel_unprepare(panel);
391panel_prep_fail: 411panel_prep_fail:
392 if (is_dual_panel && msm_dsi1) 412 if (is_dual_dsi && msm_dsi1)
393 msm_dsi_host_power_off(msm_dsi1->host); 413 msm_dsi_host_power_off(msm_dsi1->host);
394host1_on_fail: 414host1_on_fail:
395 msm_dsi_host_power_off(host); 415 msm_dsi_host_power_off(host);
@@ -414,37 +434,44 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
414 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); 434 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
415 struct mipi_dsi_host *host = msm_dsi->host; 435 struct mipi_dsi_host *host = msm_dsi->host;
416 struct drm_panel *panel = msm_dsi->panel; 436 struct drm_panel *panel = msm_dsi->panel;
417 bool is_dual_panel = IS_DUAL_PANEL(); 437 bool is_dual_dsi = IS_DUAL_DSI();
418 int ret; 438 int ret;
419 439
420 DBG("id=%d", id); 440 DBG("id=%d", id);
421 441
422 if (!panel || (is_dual_panel && (DSI_1 == id))) 442 if (!msm_dsi_device_connected(msm_dsi) ||
443 (is_dual_dsi && (DSI_1 == id)))
423 return; 444 return;
424 445
425 ret = drm_panel_disable(panel); 446 if (panel) {
426 if (ret) 447 ret = drm_panel_disable(panel);
427 pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret); 448 if (ret)
449 pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
450 ret);
451 }
428 452
429 ret = msm_dsi_host_disable(host); 453 ret = msm_dsi_host_disable(host);
430 if (ret) 454 if (ret)
431 pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); 455 pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
432 456
433 if (is_dual_panel && msm_dsi1) { 457 if (is_dual_dsi && msm_dsi1) {
434 ret = msm_dsi_host_disable(msm_dsi1->host); 458 ret = msm_dsi_host_disable(msm_dsi1->host);
435 if (ret) 459 if (ret)
436 pr_err("%s: host1 disable failed, %d\n", __func__, ret); 460 pr_err("%s: host1 disable failed, %d\n", __func__, ret);
437 } 461 }
438 462
439 ret = drm_panel_unprepare(panel); 463 if (panel) {
440 if (ret) 464 ret = drm_panel_unprepare(panel);
441 pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret); 465 if (ret)
466 pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
467 id, ret);
468 }
442 469
443 ret = msm_dsi_host_power_off(host); 470 ret = msm_dsi_host_power_off(host);
444 if (ret) 471 if (ret)
445 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); 472 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
446 473
447 if (is_dual_panel && msm_dsi1) { 474 if (is_dual_dsi && msm_dsi1) {
448 ret = msm_dsi_host_power_off(msm_dsi1->host); 475 ret = msm_dsi_host_power_off(msm_dsi1->host);
449 if (ret) 476 if (ret)
450 pr_err("%s: host1 power off failed, %d\n", 477 pr_err("%s: host1 power off failed, %d\n",
@@ -460,7 +487,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
460 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 487 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
461 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); 488 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
462 struct mipi_dsi_host *host = msm_dsi->host; 489 struct mipi_dsi_host *host = msm_dsi->host;
463 bool is_dual_panel = IS_DUAL_PANEL(); 490 bool is_dual_dsi = IS_DUAL_DSI();
464 491
465 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 492 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
466 mode->base.id, mode->name, 493 mode->base.id, mode->name,
@@ -471,11 +498,11 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
471 mode->vsync_end, mode->vtotal, 498 mode->vsync_end, mode->vtotal,
472 mode->type, mode->flags); 499 mode->type, mode->flags);
473 500
474 if (is_dual_panel && (DSI_1 == id)) 501 if (is_dual_dsi && (DSI_1 == id))
475 return; 502 return;
476 503
477 msm_dsi_host_set_display_mode(host, adjusted_mode); 504 msm_dsi_host_set_display_mode(host, adjusted_mode);
478 if (is_dual_panel && other_dsi) 505 if (is_dual_dsi && other_dsi)
479 msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); 506 msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
480} 507}
481 508
@@ -503,7 +530,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
503 .mode_set = dsi_mgr_bridge_mode_set, 530 .mode_set = dsi_mgr_bridge_mode_set,
504}; 531};
505 532
506/* initialize connector */ 533/* initialize connector when we're connected to a drm_panel */
507struct drm_connector *msm_dsi_manager_connector_init(u8 id) 534struct drm_connector *msm_dsi_manager_connector_init(u8 id)
508{ 535{
509 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 536 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -588,6 +615,53 @@ fail:
588 return ERR_PTR(ret); 615 return ERR_PTR(ret);
589} 616}
590 617
618struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
619{
620 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
621 struct drm_device *dev = msm_dsi->dev;
622 struct drm_encoder *encoder;
623 struct drm_bridge *int_bridge, *ext_bridge;
624 struct drm_connector *connector;
625 struct list_head *connector_list;
626
627 int_bridge = msm_dsi->bridge;
628 ext_bridge = msm_dsi->external_bridge =
629 msm_dsi_host_get_bridge(msm_dsi->host);
630
631 /*
632 * HACK: we may not know the external DSI bridge device's mode
633 * flags here. We'll get to know them only when the device
634 * attaches to the dsi host. For now, assume the bridge supports
635 * DSI video mode
636 */
637 encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
638
639 /* link the internal dsi bridge to the external bridge */
640 int_bridge->next = ext_bridge;
641 /* set the external bridge's encoder as dsi's encoder */
642 ext_bridge->encoder = encoder;
643
644 drm_bridge_attach(dev, ext_bridge);
645
646 /*
647 * we need the drm_connector created by the external bridge
648 * driver (or someone else) to feed it to our driver's
649 * priv->connector[] list, mainly for msm_fbdev_init()
650 */
651 connector_list = &dev->mode_config.connector_list;
652
653 list_for_each_entry(connector, connector_list, head) {
654 int i;
655
656 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
657 if (connector->encoder_ids[i] == encoder->base.id)
658 return connector;
659 }
660 }
661
662 return ERR_PTR(-ENODEV);
663}
664
591void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) 665void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
592{ 666{
593} 667}
@@ -598,12 +672,29 @@ int msm_dsi_manager_phy_enable(int id,
598{ 672{
599 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 673 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
600 struct msm_dsi_phy *phy = msm_dsi->phy; 674 struct msm_dsi_phy *phy = msm_dsi->phy;
675 int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
676 struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
601 int ret; 677 int ret;
602 678
603 ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate); 679 ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
604 if (ret) 680 if (ret)
605 return ret; 681 return ret;
606 682
683 /*
684 * Reset DSI PHY silently changes its PLL registers to reset status,
685 * which will confuse clock driver and result in wrong output rate of
686 * link clocks. Restore PLL status if its PLL is being used as clock
687 * source.
688 */
689 if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
690 ret = msm_dsi_pll_restore_state(pll);
691 if (ret) {
692 pr_err("%s: failed to restore pll state\n", __func__);
693 msm_dsi_phy_disable(phy);
694 return ret;
695 }
696 }
697
607 msm_dsi->phy_enabled = true; 698 msm_dsi->phy_enabled = true;
608 msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); 699 msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
609 700
@@ -616,13 +707,18 @@ void msm_dsi_manager_phy_disable(int id)
616 struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); 707 struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
617 struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); 708 struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
618 struct msm_dsi_phy *phy = msm_dsi->phy; 709 struct msm_dsi_phy *phy = msm_dsi->phy;
710 struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
711
712 /* Save PLL status if it is a clock source */
713 if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
714 msm_dsi_pll_save_state(pll);
619 715
620 /* disable DSI phy 716 /* disable DSI phy
621 * In dual-dsi configuration, the phy should be disabled for the 717 * In dual-dsi configuration, the phy should be disabled for the
622 * first controller only when the second controller is disabled. 718 * first controller only when the second controller is disabled.
623 */ 719 */
624 msm_dsi->phy_enabled = false; 720 msm_dsi->phy_enabled = false;
625 if (IS_DUAL_PANEL() && mdsi && sdsi) { 721 if (IS_DUAL_DSI() && mdsi && sdsi) {
626 if (!mdsi->phy_enabled && !sdsi->phy_enabled) { 722 if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
627 msm_dsi_phy_disable(sdsi->phy); 723 msm_dsi_phy_disable(sdsi->phy);
628 msm_dsi_phy_disable(mdsi->phy); 724 msm_dsi_phy_disable(mdsi->phy);
@@ -713,9 +809,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
713 809
714 msm_dsim->dsi[id] = msm_dsi; 810 msm_dsim->dsi[id] = msm_dsi;
715 811
716 ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id); 812 ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
717 if (ret) { 813 if (ret) {
718 pr_err("%s: failed to parse dual panel info\n", __func__); 814 pr_err("%s: failed to parse dual DSI info\n", __func__);
719 goto fail; 815 goto fail;
720 } 816 }
721 817
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 728152f3ef48..5de505e627be 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2014 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25 25
26Permission is hereby granted, free of charge, to any person obtaining 26Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 2d3b33ce1cc5..401ff58d6893 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -12,142 +12,8 @@
12 */ 12 */
13 13
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/regulator/consumer.h>
16 15
17#include "dsi.h" 16#include "dsi_phy.h"
18#include "dsi.xml.h"
19
20#define dsi_phy_read(offset) msm_readl((offset))
21#define dsi_phy_write(offset, data) msm_writel((data), (offset))
22
23struct dsi_phy_ops {
24 int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
25 const unsigned long bit_rate, const unsigned long esc_rate);
26 int (*disable)(struct msm_dsi_phy *phy);
27};
28
29struct dsi_phy_cfg {
30 enum msm_dsi_phy_type type;
31 struct dsi_reg_config reg_cfg;
32 struct dsi_phy_ops ops;
33};
34
35struct dsi_dphy_timing {
36 u32 clk_pre;
37 u32 clk_post;
38 u32 clk_zero;
39 u32 clk_trail;
40 u32 clk_prepare;
41 u32 hs_exit;
42 u32 hs_zero;
43 u32 hs_prepare;
44 u32 hs_trail;
45 u32 hs_rqst;
46 u32 ta_go;
47 u32 ta_sure;
48 u32 ta_get;
49};
50
51struct msm_dsi_phy {
52 struct platform_device *pdev;
53 void __iomem *base;
54 void __iomem *reg_base;
55 int id;
56
57 struct clk *ahb_clk;
58 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
59
60 struct dsi_dphy_timing timing;
61 const struct dsi_phy_cfg *cfg;
62
63 struct msm_dsi_pll *pll;
64};
65
66static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
67{
68 struct regulator_bulk_data *s = phy->supplies;
69 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
70 struct device *dev = &phy->pdev->dev;
71 int num = phy->cfg->reg_cfg.num;
72 int i, ret;
73
74 for (i = 0; i < num; i++)
75 s[i].supply = regs[i].name;
76
77 ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
78 if (ret < 0) {
79 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
80 __func__, ret);
81 return ret;
82 }
83
84 for (i = 0; i < num; i++) {
85 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
86 ret = regulator_set_voltage(s[i].consumer,
87 regs[i].min_voltage, regs[i].max_voltage);
88 if (ret < 0) {
89 dev_err(dev,
90 "regulator %d set voltage failed, %d\n",
91 i, ret);
92 return ret;
93 }
94 }
95 }
96
97 return 0;
98}
99
100static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
101{
102 struct regulator_bulk_data *s = phy->supplies;
103 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
104 int num = phy->cfg->reg_cfg.num;
105 int i;
106
107 DBG("");
108 for (i = num - 1; i >= 0; i--)
109 if (regs[i].disable_load >= 0)
110 regulator_set_load(s[i].consumer,
111 regs[i].disable_load);
112
113 regulator_bulk_disable(num, s);
114}
115
116static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
117{
118 struct regulator_bulk_data *s = phy->supplies;
119 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
120 struct device *dev = &phy->pdev->dev;
121 int num = phy->cfg->reg_cfg.num;
122 int ret, i;
123
124 DBG("");
125 for (i = 0; i < num; i++) {
126 if (regs[i].enable_load >= 0) {
127 ret = regulator_set_load(s[i].consumer,
128 regs[i].enable_load);
129 if (ret < 0) {
130 dev_err(dev,
131 "regulator %d set op mode failed, %d\n",
132 i, ret);
133 goto fail;
134 }
135 }
136 }
137
138 ret = regulator_bulk_enable(num, s);
139 if (ret < 0) {
140 dev_err(dev, "regulator enable failed, %d\n", ret);
141 goto fail;
142 }
143
144 return 0;
145
146fail:
147 for (i--; i >= 0; i--)
148 regulator_set_load(s[i].consumer, regs[i].disable_load);
149 return ret;
150}
151 17
152#define S_DIV_ROUND_UP(n, d) \ 18#define S_DIV_ROUND_UP(n, d) \
153 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) 19 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
@@ -156,6 +22,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
156 s32 min_result, bool even) 22 s32 min_result, bool even)
157{ 23{
158 s32 v; 24 s32 v;
25
159 v = (tmax - tmin) * percent; 26 v = (tmax - tmin) * percent;
160 v = S_DIV_ROUND_UP(v, 100) + tmin; 27 v = S_DIV_ROUND_UP(v, 100) + tmin;
161 if (even && (v & 0x1)) 28 if (even && (v & 0x1))
@@ -164,7 +31,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
164 return max_t(s32, min_result, v); 31 return max_t(s32, min_result, v);
165} 32}
166 33
167static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing, 34static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
168 s32 ui, s32 coeff, s32 pcnt) 35 s32 ui, s32 coeff, s32 pcnt)
169{ 36{
170 s32 tmax, tmin, clk_z; 37 s32 tmax, tmin, clk_z;
@@ -186,7 +53,7 @@ static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
186 timing->clk_zero = clk_z + 8 - temp; 53 timing->clk_zero = clk_z + 8 - temp;
187} 54}
188 55
189static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing, 56int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
190 const unsigned long bit_rate, const unsigned long esc_rate) 57 const unsigned long bit_rate, const unsigned long esc_rate)
191{ 58{
192 s32 ui, lpx; 59 s32 ui, lpx;
@@ -256,9 +123,8 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
256 temp += 8 * ui + lpx; 123 temp += 8 * ui + lpx;
257 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; 124 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
258 if (tmin > tmax) { 125 if (tmin > tmax) {
259 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1; 126 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
260 timing->clk_pre = temp >> 1; 127 timing->clk_pre = temp >> 1;
261 temp = (2 * tmax - tmin) * pcnt2;
262 } else { 128 } else {
263 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); 129 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
264 } 130 }
@@ -276,130 +142,119 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
276 return 0; 142 return 0;
277} 143}
278 144
279static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) 145void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
146 u32 bit_mask)
280{ 147{
281 void __iomem *base = phy->reg_base; 148 int phy_id = phy->id;
149 u32 val;
282 150
283 if (!enable) { 151 if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
284 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
285 return; 152 return;
286 }
287 153
288 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); 154 val = dsi_phy_read(phy->base + reg);
289 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); 155
290 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); 156 if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
291 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); 157 dsi_phy_write(phy->base + reg, val | bit_mask);
292 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3); 158 else
293 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); 159 dsi_phy_write(phy->base + reg, val & (~bit_mask));
294 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
295 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
296} 160}
297 161
298static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, 162static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
299 const unsigned long bit_rate, const unsigned long esc_rate)
300{ 163{
301 struct dsi_dphy_timing *timing = &phy->timing; 164 struct regulator_bulk_data *s = phy->supplies;
302 int i; 165 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
303 void __iomem *base = phy->base; 166 struct device *dev = &phy->pdev->dev;
167 int num = phy->cfg->reg_cfg.num;
168 int i, ret;
304 169
305 DBG(""); 170 for (i = 0; i < num; i++)
171 s[i].supply = regs[i].name;
306 172
307 if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { 173 ret = devm_regulator_bulk_get(dev, num, s);
308 pr_err("%s: D-PHY timing calculation failed\n", __func__); 174 if (ret < 0) {
309 return -EINVAL; 175 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
176 __func__, ret);
177 return ret;
310 } 178 }
311 179
312 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); 180 for (i = 0; i < num; i++) {
313 181 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
314 dsi_28nm_phy_regulator_ctrl(phy, true); 182 ret = regulator_set_voltage(s[i].consumer,
315 183 regs[i].min_voltage, regs[i].max_voltage);
316 dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); 184 if (ret < 0) {
317 185 dev_err(dev,
318 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, 186 "regulator %d set voltage failed, %d\n",
319 DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); 187 i, ret);
320 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, 188 return ret;
321 DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); 189 }
322 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, 190 }
323 DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
324 if (timing->clk_zero & BIT(8))
325 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
326 DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
327 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
328 DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
329 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
330 DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
331 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
332 DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
333 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
334 DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
335 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
336 DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
337 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
338 DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
339 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
340 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
341 DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
342 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
343 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
344
345 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
346 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
347
348 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
349
350 for (i = 0; i < 4; i++) {
351 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
352 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
353 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
354 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
355 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
356 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
357 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
358 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
359 } 191 }
360 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
361 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
362 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
363 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
364 192
365 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); 193 return 0;
366 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); 194}
367 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
368 195
369 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); 196static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
197{
198 struct regulator_bulk_data *s = phy->supplies;
199 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
200 int num = phy->cfg->reg_cfg.num;
201 int i;
370 202
371 if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER)) 203 DBG("");
372 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00); 204 for (i = num - 1; i >= 0; i--)
373 else 205 if (regs[i].disable_load >= 0)
374 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01); 206 regulator_set_load(s[i].consumer, regs[i].disable_load);
375 207
376 return 0; 208 regulator_bulk_disable(num, s);
377} 209}
378 210
379static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy) 211static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
380{ 212{
381 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0); 213 struct regulator_bulk_data *s = phy->supplies;
382 dsi_28nm_phy_regulator_ctrl(phy, false); 214 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
215 struct device *dev = &phy->pdev->dev;
216 int num = phy->cfg->reg_cfg.num;
217 int ret, i;
383 218
384 /* 219 DBG("");
385 * Wait for the registers writes to complete in order to 220 for (i = 0; i < num; i++) {
386 * ensure that the phy is completely disabled 221 if (regs[i].enable_load >= 0) {
387 */ 222 ret = regulator_set_load(s[i].consumer,
388 wmb(); 223 regs[i].enable_load);
224 if (ret < 0) {
225 dev_err(dev,
226 "regulator %d set op mode failed, %d\n",
227 i, ret);
228 goto fail;
229 }
230 }
231 }
232
233 ret = regulator_bulk_enable(num, s);
234 if (ret < 0) {
235 dev_err(dev, "regulator enable failed, %d\n", ret);
236 goto fail;
237 }
389 238
390 return 0; 239 return 0;
240
241fail:
242 for (i--; i >= 0; i--)
243 regulator_set_load(s[i].consumer, regs[i].disable_load);
244 return ret;
391} 245}
392 246
393static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) 247static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
394{ 248{
249 struct device *dev = &phy->pdev->dev;
395 int ret; 250 int ret;
396 251
397 pm_runtime_get_sync(&phy->pdev->dev); 252 pm_runtime_get_sync(dev);
398 253
399 ret = clk_prepare_enable(phy->ahb_clk); 254 ret = clk_prepare_enable(phy->ahb_clk);
400 if (ret) { 255 if (ret) {
401 pr_err("%s: can't enable ahb clk, %d\n", __func__, ret); 256 dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
402 pm_runtime_put_sync(&phy->pdev->dev); 257 pm_runtime_put_sync(dev);
403 } 258 }
404 259
405 return ret; 260 return ret;
@@ -411,92 +266,74 @@ static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
411 pm_runtime_put_sync(&phy->pdev->dev); 266 pm_runtime_put_sync(&phy->pdev->dev);
412} 267}
413 268
414static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
415 [MSM_DSI_PHY_28NM_HPM] = {
416 .type = MSM_DSI_PHY_28NM_HPM,
417 .reg_cfg = {
418 .num = 1,
419 .regs = {
420 {"vddio", 1800000, 1800000, 100000, 100},
421 },
422 },
423 .ops = {
424 .enable = dsi_28nm_phy_enable,
425 .disable = dsi_28nm_phy_disable,
426 }
427 },
428 [MSM_DSI_PHY_28NM_LP] = {
429 .type = MSM_DSI_PHY_28NM_LP,
430 .reg_cfg = {
431 .num = 1,
432 .regs = {
433 {"vddio", 1800000, 1800000, 100000, 100},
434 },
435 },
436 .ops = {
437 .enable = dsi_28nm_phy_enable,
438 .disable = dsi_28nm_phy_disable,
439 }
440 },
441};
442
443static const struct of_device_id dsi_phy_dt_match[] = { 269static const struct of_device_id dsi_phy_dt_match[] = {
270#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
444 { .compatible = "qcom,dsi-phy-28nm-hpm", 271 { .compatible = "qcom,dsi-phy-28nm-hpm",
445 .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],}, 272 .data = &dsi_phy_28nm_hpm_cfgs },
446 { .compatible = "qcom,dsi-phy-28nm-lp", 273 { .compatible = "qcom,dsi-phy-28nm-lp",
447 .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],}, 274 .data = &dsi_phy_28nm_lp_cfgs },
275#endif
276#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
277 { .compatible = "qcom,dsi-phy-20nm",
278 .data = &dsi_phy_20nm_cfgs },
279#endif
448 {} 280 {}
449}; 281};
450 282
451static int dsi_phy_driver_probe(struct platform_device *pdev) 283static int dsi_phy_driver_probe(struct platform_device *pdev)
452{ 284{
453 struct msm_dsi_phy *phy; 285 struct msm_dsi_phy *phy;
286 struct device *dev = &pdev->dev;
454 const struct of_device_id *match; 287 const struct of_device_id *match;
455 int ret; 288 int ret;
456 289
457 phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); 290 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
458 if (!phy) 291 if (!phy)
459 return -ENOMEM; 292 return -ENOMEM;
460 293
461 match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node); 294 match = of_match_node(dsi_phy_dt_match, dev->of_node);
462 if (!match) 295 if (!match)
463 return -ENODEV; 296 return -ENODEV;
464 297
465 phy->cfg = match->data; 298 phy->cfg = match->data;
466 phy->pdev = pdev; 299 phy->pdev = pdev;
467 300
468 ret = of_property_read_u32(pdev->dev.of_node, 301 ret = of_property_read_u32(dev->of_node,
469 "qcom,dsi-phy-index", &phy->id); 302 "qcom,dsi-phy-index", &phy->id);
470 if (ret) { 303 if (ret) {
471 dev_err(&pdev->dev, 304 dev_err(dev, "%s: PHY index not specified, %d\n",
472 "%s: PHY index not specified, ret=%d\n",
473 __func__, ret); 305 __func__, ret);
474 goto fail; 306 goto fail;
475 } 307 }
476 308
309 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
310 "qcom,dsi-phy-regulator-ldo-mode");
311
477 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 312 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
478 if (IS_ERR(phy->base)) { 313 if (IS_ERR(phy->base)) {
479 dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__); 314 dev_err(dev, "%s: failed to map phy base\n", __func__);
480 ret = -ENOMEM; 315 ret = -ENOMEM;
481 goto fail; 316 goto fail;
482 } 317 }
483 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG"); 318
319 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
320 "DSI_PHY_REG");
484 if (IS_ERR(phy->reg_base)) { 321 if (IS_ERR(phy->reg_base)) {
485 dev_err(&pdev->dev, 322 dev_err(dev, "%s: failed to map phy regulator base\n",
486 "%s: failed to map phy regulator base\n", __func__); 323 __func__);
487 ret = -ENOMEM; 324 ret = -ENOMEM;
488 goto fail; 325 goto fail;
489 } 326 }
490 327
491 ret = dsi_phy_regulator_init(phy); 328 ret = dsi_phy_regulator_init(phy);
492 if (ret) { 329 if (ret) {
493 dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__); 330 dev_err(dev, "%s: failed to init regulator\n", __func__);
494 goto fail; 331 goto fail;
495 } 332 }
496 333
497 phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk"); 334 phy->ahb_clk = devm_clk_get(dev, "iface_clk");
498 if (IS_ERR(phy->ahb_clk)) { 335 if (IS_ERR(phy->ahb_clk)) {
499 pr_err("%s: Unable to get ahb clk\n", __func__); 336 dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
500 ret = PTR_ERR(phy->ahb_clk); 337 ret = PTR_ERR(phy->ahb_clk);
501 goto fail; 338 goto fail;
502 } 339 }
@@ -510,7 +347,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
510 347
511 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); 348 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
512 if (!phy->pll) 349 if (!phy->pll)
513 dev_info(&pdev->dev, 350 dev_info(dev,
514 "%s: pll init failed, need separate pll clk driver\n", 351 "%s: pll init failed, need separate pll clk driver\n",
515 __func__); 352 __func__);
516 353
@@ -557,9 +394,10 @@ void __exit msm_dsi_phy_driver_unregister(void)
557 platform_driver_unregister(&dsi_phy_platform_driver); 394 platform_driver_unregister(&dsi_phy_platform_driver);
558} 395}
559 396
560int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, 397int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
561 const unsigned long bit_rate, const unsigned long esc_rate) 398 const unsigned long bit_rate, const unsigned long esc_rate)
562{ 399{
400 struct device *dev = &phy->pdev->dev;
563 int ret; 401 int ret;
564 402
565 if (!phy || !phy->cfg->ops.enable) 403 if (!phy || !phy->cfg->ops.enable)
@@ -567,30 +405,37 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
567 405
568 ret = dsi_phy_regulator_enable(phy); 406 ret = dsi_phy_regulator_enable(phy);
569 if (ret) { 407 if (ret) {
570 dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n", 408 dev_err(dev, "%s: regulator enable failed, %d\n",
571 __func__, ret); 409 __func__, ret);
572 return ret; 410 return ret;
573 } 411 }
574 412
575 return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate); 413 ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
414 if (ret) {
415 dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
416 dsi_phy_regulator_disable(phy);
417 return ret;
418 }
419
420 return 0;
576} 421}
577 422
578int msm_dsi_phy_disable(struct msm_dsi_phy *phy) 423void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
579{ 424{
580 if (!phy || !phy->cfg->ops.disable) 425 if (!phy || !phy->cfg->ops.disable)
581 return -EINVAL; 426 return;
582 427
583 phy->cfg->ops.disable(phy); 428 phy->cfg->ops.disable(phy);
584 dsi_phy_regulator_disable(phy);
585 429
586 return 0; 430 dsi_phy_regulator_disable(phy);
587} 431}
588 432
589void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, 433void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
590 u32 *clk_pre, u32 *clk_post) 434 u32 *clk_pre, u32 *clk_post)
591{ 435{
592 if (!phy) 436 if (!phy)
593 return; 437 return;
438
594 if (clk_pre) 439 if (clk_pre)
595 *clk_pre = phy->timing.clk_pre; 440 *clk_pre = phy->timing.clk_pre;
596 if (clk_post) 441 if (clk_post)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 000000000000..0456b253239f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __DSI_PHY_H__
15#define __DSI_PHY_H__
16
17#include <linux/regulator/consumer.h>
18
19#include "dsi.h"
20
21#define dsi_phy_read(offset) msm_readl((offset))
22#define dsi_phy_write(offset, data) msm_writel((data), (offset))
23
24struct msm_dsi_phy_ops {
25 int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
26 const unsigned long bit_rate, const unsigned long esc_rate);
27 void (*disable)(struct msm_dsi_phy *phy);
28};
29
30struct msm_dsi_phy_cfg {
31 enum msm_dsi_phy_type type;
32 struct dsi_reg_config reg_cfg;
33 struct msm_dsi_phy_ops ops;
34
35 /*
36 * Each cell {phy_id, pll_id} of the truth table indicates
37 * if the source PLL selection bit should be set for each PHY.
38 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
39 */
40 bool src_pll_truthtable[DSI_MAX][DSI_MAX];
41};
42
43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
44extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
45extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
46
47struct msm_dsi_dphy_timing {
48 u32 clk_pre;
49 u32 clk_post;
50 u32 clk_zero;
51 u32 clk_trail;
52 u32 clk_prepare;
53 u32 hs_exit;
54 u32 hs_zero;
55 u32 hs_prepare;
56 u32 hs_trail;
57 u32 hs_rqst;
58 u32 ta_go;
59 u32 ta_sure;
60 u32 ta_get;
61};
62
63struct msm_dsi_phy {
64 struct platform_device *pdev;
65 void __iomem *base;
66 void __iomem *reg_base;
67 int id;
68
69 struct clk *ahb_clk;
70 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
71
72 struct msm_dsi_dphy_timing timing;
73 const struct msm_dsi_phy_cfg *cfg;
74
75 bool regulator_ldo_mode;
76
77 struct msm_dsi_pll *pll;
78};
79
80/*
81 * PHY internal functions
82 */
83int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
84 const unsigned long bit_rate, const unsigned long esc_rate);
85void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
86 u32 bit_mask);
87
88#endif /* __DSI_PHY_H__ */
89
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 000000000000..2e9ba118d50a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi_phy.h"
15#include "dsi.xml.h"
16
17static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
18 struct msm_dsi_dphy_timing *timing)
19{
20 void __iomem *base = phy->base;
21
22 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
23 DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
24 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
25 DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
26 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
27 DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
28 if (timing->clk_zero & BIT(8))
29 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
30 DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
31 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
32 DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
33 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
34 DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
35 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
36 DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
37 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
38 DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
39 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
40 DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
41 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
42 DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
43 DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
44 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
45 DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
46 dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
47 DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
48}
49
50static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
51{
52 void __iomem *base = phy->reg_base;
53
54 if (!enable) {
55 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
56 return;
57 }
58
59 if (phy->regulator_ldo_mode) {
60 dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
61 return;
62 }
63
64 /* non LDO mode */
65 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
66 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
67 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
68 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
69 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
70 dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
71 dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
72}
73
74static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
75 const unsigned long bit_rate, const unsigned long esc_rate)
76{
77 struct msm_dsi_dphy_timing *timing = &phy->timing;
78 int i;
79 void __iomem *base = phy->base;
80 u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
81
82 DBG("");
83
84 if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
85 dev_err(&phy->pdev->dev,
86 "%s: D-PHY timing calculation failed\n", __func__);
87 return -EINVAL;
88 }
89
90 dsi_20nm_phy_regulator_ctrl(phy, true);
91
92 dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
93
94 msm_dsi_phy_set_src_pll(phy, src_pll_id,
95 REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
96 DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
97
98 for (i = 0; i < 4; i++) {
99 dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
100 (i >> 1) * 0x40);
101 dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
102 dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
103 dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
104 dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
105 dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
106 }
107
108 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
109 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
110 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
111 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
112 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
113 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
114 dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
115
116 dsi_20nm_dphy_set_timing(phy, timing);
117
118 dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
119
120 dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
121
122 /* make sure everything is written before enable */
123 wmb();
124 dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
125
126 return 0;
127}
128
129static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
130{
131 dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
132 dsi_20nm_phy_regulator_ctrl(phy, false);
133}
134
135const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
136 .type = MSM_DSI_PHY_20NM,
137 .src_pll_truthtable = { {false, true}, {false, true} },
138 .reg_cfg = {
139 .num = 2,
140 .regs = {
141 {"vddio", 1800000, 1800000, 100000, 100},
142 {"vcca", 1000000, 1000000, 10000, 100},
143 },
144 },
145 .ops = {
146 .enable = dsi_20nm_phy_enable,
147 .disable = dsi_20nm_phy_disable,
148 }
149};
150
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 000000000000..f1a7c7b46420
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,166 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi_phy.h"
15#include "dsi.xml.h"
16
17static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
18 struct msm_dsi_dphy_timing *timing)
19{
20 void __iomem *base = phy->base;
21
22 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
23 DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
24 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
25 DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
26 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
27 DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
28 if (timing->clk_zero & BIT(8))
29 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
30 DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
31 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
32 DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
33 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
34 DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
35 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
36 DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
37 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
38 DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
39 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
40 DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
41 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
42 DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
43 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
44 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
45 DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
46 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
47 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
48}
49
50static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
51{
52 void __iomem *base = phy->reg_base;
53
54 if (!enable) {
55 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
56 return;
57 }
58
59 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
60 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
61 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
62 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
63 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
64 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
65 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
66 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
67}
68
69static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
70 const unsigned long bit_rate, const unsigned long esc_rate)
71{
72 struct msm_dsi_dphy_timing *timing = &phy->timing;
73 int i;
74 void __iomem *base = phy->base;
75
76 DBG("");
77
78 if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
79 dev_err(&phy->pdev->dev,
80 "%s: D-PHY timing calculation failed\n", __func__);
81 return -EINVAL;
82 }
83
84 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
85
86 dsi_28nm_phy_regulator_ctrl(phy, true);
87
88 dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
89
90 dsi_28nm_dphy_set_timing(phy, timing);
91
92 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
93 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
94
95 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
96
97 for (i = 0; i < 4; i++) {
98 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
99 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
100 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
101 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
102 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
103 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
104 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
105 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
106 }
107 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
108 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
109 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
110 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
111
112 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
113 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
114 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
115
116 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
117
118 msm_dsi_phy_set_src_pll(phy, src_pll_id,
119 REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
120 DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
121
122 return 0;
123}
124
125static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
126{
127 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
128 dsi_28nm_phy_regulator_ctrl(phy, false);
129
130 /*
131 * Wait for the registers writes to complete in order to
132 * ensure that the phy is completely disabled
133 */
134 wmb();
135}
136
137const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
138 .type = MSM_DSI_PHY_28NM_HPM,
139 .src_pll_truthtable = { {true, true}, {false, true} },
140 .reg_cfg = {
141 .num = 1,
142 .regs = {
143 {"vddio", 1800000, 1800000, 100000, 100},
144 },
145 },
146 .ops = {
147 .enable = dsi_28nm_phy_enable,
148 .disable = dsi_28nm_phy_disable,
149 },
150};
151
152const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
153 .type = MSM_DSI_PHY_28NM_LP,
154 .src_pll_truthtable = { {true, true}, {true, true} },
155 .reg_cfg = {
156 .num = 1,
157 .regs = {
158 {"vddio", 1800000, 1800000, 100000, 100},
159 },
160 },
161 .ops = {
162 .enable = dsi_28nm_phy_enable,
163 .disable = dsi_28nm_phy_disable,
164 },
165};
166
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 509376fdd112..5104fc9f9a53 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -72,31 +72,14 @@ long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
72int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw) 72int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
73{ 73{
74 struct msm_dsi_pll *pll = hw_clk_to_pll(hw); 74 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
75 int ret;
76
77 /*
78 * Certain PLLs need to update the same VCO rate and registers
79 * after resume in suspend/resume scenario.
80 */
81 if (pll->restore_state) {
82 ret = pll->restore_state(pll);
83 if (ret)
84 goto error;
85 }
86 75
87 ret = dsi_pll_enable(pll); 76 return dsi_pll_enable(pll);
88
89error:
90 return ret;
91} 77}
92 78
93void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw) 79void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
94{ 80{
95 struct msm_dsi_pll *pll = hw_clk_to_pll(hw); 81 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
96 82
97 if (pll->save_state)
98 pll->save_state(pll);
99
100 dsi_pll_disable(pll); 83 dsi_pll_disable(pll);
101} 84}
102 85
@@ -134,6 +117,29 @@ void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
134 pll->destroy(pll); 117 pll->destroy(pll);
135} 118}
136 119
120void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
121{
122 if (pll->save_state) {
123 pll->save_state(pll);
124 pll->state_saved = true;
125 }
126}
127
128int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
129{
130 int ret;
131
132 if (pll->restore_state && pll->state_saved) {
133 ret = pll->restore_state(pll);
134 if (ret)
135 return ret;
136
137 pll->state_saved = false;
138 }
139
140 return 0;
141}
142
137struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, 143struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
138 enum msm_dsi_phy_type type, int id) 144 enum msm_dsi_phy_type type, int id)
139{ 145{
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 5a3bb241c039..063caa2c5740 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -27,6 +27,7 @@ struct msm_dsi_pll {
27 27
28 struct clk_hw clk_hw; 28 struct clk_hw clk_hw;
29 bool pll_on; 29 bool pll_on;
30 bool state_saved;
30 31
31 unsigned long min_rate; 32 unsigned long min_rate;
32 unsigned long max_rate; 33 unsigned long max_rate;
@@ -82,8 +83,16 @@ void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
82/* 83/*
83 * Initialization for Each PLL Type 84 * Initialization for Each PLL Type
84 */ 85 */
86#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
85struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev, 87struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
86 enum msm_dsi_phy_type type, int id); 88 enum msm_dsi_phy_type type, int id);
89#else
90static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
91 struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
92{
93 return ERR_PTR(-ENODEV);
94}
95#endif
87 96
88#endif /* __DSI_PLL_H__ */ 97#endif /* __DSI_PLL_H__ */
89 98
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index eb8ac3097ff5..1912cfcca48c 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -465,26 +465,21 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
465 void __iomem *base = pll_28nm->mmio; 465 void __iomem *base = pll_28nm->mmio;
466 int ret; 466 int ret;
467 467
468 if ((cached_state->vco_rate != 0) && 468 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
469 (cached_state->vco_rate == __clk_get_rate(pll->clk_hw.clk))) { 469 cached_state->vco_rate, 0);
470 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw, 470 if (ret) {
471 cached_state->vco_rate, 0); 471 dev_err(&pll_28nm->pdev->dev,
472 if (ret) { 472 "restore vco rate failed. ret=%d\n", ret);
473 dev_err(&pll_28nm->pdev->dev, 473 return ret;
474 "restore vco rate failed. ret=%d\n", ret);
475 return ret;
476 }
477
478 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
479 cached_state->postdiv3);
480 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
481 cached_state->postdiv1);
482 pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
483 cached_state->byte_mux);
484
485 cached_state->vco_rate = 0;
486 } 474 }
487 475
476 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
477 cached_state->postdiv3);
478 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
479 cached_state->postdiv1);
480 pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
481 cached_state->byte_mux);
482
488 return 0; 483 return 0;
489} 484}
490 485
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 26f268e2dd3d..06cbddfc914f 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25 25
26Permission is hereby granted, free of charge, to any person obtaining 26Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f9c71dceb5e2..bef1d65fe28c 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7991069dd492..81200e9be382 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -373,7 +373,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
373 struct device *dev = &ctrl->pdev->dev; 373 struct device *dev = &ctrl->pdev->dev;
374 int ret; 374 int ret;
375 375
376 ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd"); 376 ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
377 if (IS_ERR(ctrl->panel_hpd_gpio)) { 377 if (IS_ERR(ctrl->panel_hpd_gpio)) {
378 ret = PTR_ERR(ctrl->panel_hpd_gpio); 378 ret = PTR_ERR(ctrl->panel_hpd_gpio);
379 ctrl->panel_hpd_gpio = NULL; 379 ctrl->panel_hpd_gpio = NULL;
@@ -381,13 +381,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
381 return ret; 381 return ret;
382 } 382 }
383 383
384 ret = gpiod_direction_input(ctrl->panel_hpd_gpio); 384 ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
385 if (ret) {
386 pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret);
387 return ret;
388 }
389
390 ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en");
391 if (IS_ERR(ctrl->panel_en_gpio)) { 385 if (IS_ERR(ctrl->panel_en_gpio)) {
392 ret = PTR_ERR(ctrl->panel_en_gpio); 386 ret = PTR_ERR(ctrl->panel_en_gpio);
393 ctrl->panel_en_gpio = NULL; 387 ctrl->panel_en_gpio = NULL;
@@ -395,13 +389,6 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
395 return ret; 389 return ret;
396 } 390 }
397 391
398 ret = gpiod_direction_output(ctrl->panel_en_gpio, 0);
399 if (ret) {
400 pr_err("%s: Set direction for panel_en failed, %d\n",
401 __func__, ret);
402 return ret;
403 }
404
405 DBG("gpio on"); 392 DBG("gpio on");
406 393
407 return 0; 394 return 0;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 814536202efe..101b324cdeef 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -22,7 +22,9 @@
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 22void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
23{ 23{
24 uint32_t ctrl = 0; 24 uint32_t ctrl = 0;
25 unsigned long flags;
25 26
27 spin_lock_irqsave(&hdmi->reg_lock, flags);
26 if (power_on) { 28 if (power_on) {
27 ctrl |= HDMI_CTRL_ENABLE; 29 ctrl |= HDMI_CTRL_ENABLE;
28 if (!hdmi->hdmi_mode) { 30 if (!hdmi->hdmi_mode) {
@@ -37,6 +39,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
37 } 39 }
38 40
39 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); 41 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
42 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
40 DBG("HDMI Core: %s, HDMI_CTRL=0x%08x", 43 DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
41 power_on ? "Enable" : "Disable", ctrl); 44 power_on ? "Enable" : "Disable", ctrl);
42} 45}
@@ -51,6 +54,10 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id)
51 /* Process DDC: */ 54 /* Process DDC: */
52 hdmi_i2c_irq(hdmi->i2c); 55 hdmi_i2c_irq(hdmi->i2c);
53 56
57 /* Process HDCP: */
58 if (hdmi->hdcp_ctrl)
59 hdmi_hdcp_irq(hdmi->hdcp_ctrl);
60
54 /* TODO audio.. */ 61 /* TODO audio.. */
55 62
56 return IRQ_HANDLED; 63 return IRQ_HANDLED;
@@ -60,6 +67,15 @@ static void hdmi_destroy(struct hdmi *hdmi)
60{ 67{
61 struct hdmi_phy *phy = hdmi->phy; 68 struct hdmi_phy *phy = hdmi->phy;
62 69
70 /*
71 * at this point, hpd has been disabled,
72 * after flush workq, it's safe to deinit hdcp
73 */
74 if (hdmi->workq) {
75 flush_workqueue(hdmi->workq);
76 destroy_workqueue(hdmi->workq);
77 }
78 hdmi_hdcp_destroy(hdmi);
63 if (phy) 79 if (phy)
64 phy->funcs->destroy(phy); 80 phy->funcs->destroy(phy);
65 81
@@ -77,6 +93,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
77{ 93{
78 struct hdmi_platform_config *config = pdev->dev.platform_data; 94 struct hdmi_platform_config *config = pdev->dev.platform_data;
79 struct hdmi *hdmi = NULL; 95 struct hdmi *hdmi = NULL;
96 struct resource *res;
80 int i, ret; 97 int i, ret;
81 98
82 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); 99 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -87,18 +104,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
87 104
88 hdmi->pdev = pdev; 105 hdmi->pdev = pdev;
89 hdmi->config = config; 106 hdmi->config = config;
107 spin_lock_init(&hdmi->reg_lock);
90 108
91 /* not sure about which phy maps to which msm.. probably I miss some */ 109 /* not sure about which phy maps to which msm.. probably I miss some */
92 if (config->phy_init) 110 if (config->phy_init) {
93 hdmi->phy = config->phy_init(hdmi); 111 hdmi->phy = config->phy_init(hdmi);
94 else
95 hdmi->phy = ERR_PTR(-ENXIO);
96 112
97 if (IS_ERR(hdmi->phy)) { 113 if (IS_ERR(hdmi->phy)) {
98 ret = PTR_ERR(hdmi->phy); 114 ret = PTR_ERR(hdmi->phy);
99 dev_err(&pdev->dev, "failed to load phy: %d\n", ret); 115 dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
100 hdmi->phy = NULL; 116 hdmi->phy = NULL;
101 goto fail; 117 goto fail;
118 }
102 } 119 }
103 120
104 hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); 121 hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
@@ -107,6 +124,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
107 goto fail; 124 goto fail;
108 } 125 }
109 126
127 /* HDCP needs physical address of hdmi register */
128 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
129 config->mmio_name);
130 hdmi->mmio_phy_addr = res->start;
131
132 hdmi->qfprom_mmio = msm_ioremap(pdev,
133 config->qfprom_mmio_name, "HDMI_QFPROM");
134 if (IS_ERR(hdmi->qfprom_mmio)) {
135 dev_info(&pdev->dev, "can't find qfprom resource\n");
136 hdmi->qfprom_mmio = NULL;
137 }
138
110 hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * 139 hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
111 config->hpd_reg_cnt, GFP_KERNEL); 140 config->hpd_reg_cnt, GFP_KERNEL);
112 if (!hdmi->hpd_regs) { 141 if (!hdmi->hpd_regs) {
@@ -189,6 +218,8 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
189 hdmi->pwr_clks[i] = clk; 218 hdmi->pwr_clks[i] = clk;
190 } 219 }
191 220
221 hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
222
192 hdmi->i2c = hdmi_i2c_init(hdmi); 223 hdmi->i2c = hdmi_i2c_init(hdmi);
193 if (IS_ERR(hdmi->i2c)) { 224 if (IS_ERR(hdmi->i2c)) {
194 ret = PTR_ERR(hdmi->i2c); 225 ret = PTR_ERR(hdmi->i2c);
@@ -197,6 +228,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
197 goto fail; 228 goto fail;
198 } 229 }
199 230
231 hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
232 if (IS_ERR(hdmi->hdcp_ctrl)) {
233 dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
234 hdmi->hdcp_ctrl = NULL;
235 }
236
200 return hdmi; 237 return hdmi;
201 238
202fail: 239fail:
@@ -310,7 +347,7 @@ static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
310static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"}; 347static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
311static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; 348static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
312 349
313static struct hdmi_platform_config hdmi_tx_8074_config = { 350static struct hdmi_platform_config hdmi_tx_8974_config = {
314 .phy_init = hdmi_phy_8x74_init, 351 .phy_init = hdmi_phy_8x74_init,
315 HDMI_CFG(pwr_reg, 8x74), 352 HDMI_CFG(pwr_reg, 8x74),
316 HDMI_CFG(hpd_reg, 8x74), 353 HDMI_CFG(hpd_reg, 8x74),
@@ -330,9 +367,21 @@ static struct hdmi_platform_config hdmi_tx_8084_config = {
330 .hpd_freq = hpd_clk_freq_8x74, 367 .hpd_freq = hpd_clk_freq_8x74,
331}; 368};
332 369
370static const char *hpd_reg_names_8x94[] = {};
371
372static struct hdmi_platform_config hdmi_tx_8994_config = {
373 .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
374 HDMI_CFG(pwr_reg, 8x74),
375 HDMI_CFG(hpd_reg, 8x94),
376 HDMI_CFG(pwr_clk, 8x74),
377 HDMI_CFG(hpd_clk, 8x74),
378 .hpd_freq = hpd_clk_freq_8x74,
379};
380
333static const struct of_device_id dt_match[] = { 381static const struct of_device_id dt_match[] = {
382 { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
334 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, 383 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
335 { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config }, 384 { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
336 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config }, 385 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
337 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config }, 386 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
338 {} 387 {}
@@ -347,8 +396,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
347 snprintf(name2, sizeof(name2), "%s-gpio", name); 396 snprintf(name2, sizeof(name2), "%s-gpio", name);
348 gpio = of_get_named_gpio(of_node, name2, 0); 397 gpio = of_get_named_gpio(of_node, name2, 0);
349 if (gpio < 0) { 398 if (gpio < 0) {
350 dev_err(dev, "failed to get gpio: %s (%d)\n", 399 DBG("failed to get gpio: %s (%d)", name, gpio);
351 name, gpio);
352 gpio = -1; 400 gpio = -1;
353 } 401 }
354 } 402 }
@@ -376,6 +424,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
376 } 424 }
377 425
378 hdmi_cfg->mmio_name = "core_physical"; 426 hdmi_cfg->mmio_name = "core_physical";
427 hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
379 hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); 428 hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
380 hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); 429 hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
381 hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); 430 hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
@@ -391,7 +440,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
391 if (cpu_is_apq8064()) { 440 if (cpu_is_apq8064()) {
392 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; 441 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
393 config.phy_init = hdmi_phy_8960_init; 442 config.phy_init = hdmi_phy_8960_init;
394 config.mmio_name = "hdmi_msm_hdmi_addr";
395 config.hpd_reg_names = hpd_reg_names; 443 config.hpd_reg_names = hpd_reg_names;
396 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); 444 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
397 config.hpd_clk_names = hpd_clk_names; 445 config.hpd_clk_names = hpd_clk_names;
@@ -404,7 +452,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
404 } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { 452 } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
405 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; 453 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
406 config.phy_init = hdmi_phy_8960_init; 454 config.phy_init = hdmi_phy_8960_init;
407 config.mmio_name = "hdmi_msm_hdmi_addr";
408 config.hpd_reg_names = hpd_reg_names; 455 config.hpd_reg_names = hpd_reg_names;
409 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); 456 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
410 config.hpd_clk_names = hpd_clk_names; 457 config.hpd_clk_names = hpd_clk_names;
@@ -419,7 +466,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
419 "8901_hdmi_mvs", "8901_mpp0" 466 "8901_hdmi_mvs", "8901_mpp0"
420 }; 467 };
421 config.phy_init = hdmi_phy_8x60_init; 468 config.phy_init = hdmi_phy_8x60_init;
422 config.mmio_name = "hdmi_msm_hdmi_addr";
423 config.hpd_reg_names = hpd_reg_names; 469 config.hpd_reg_names = hpd_reg_names;
424 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); 470 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
425 config.hpd_clk_names = hpd_clk_names; 471 config.hpd_clk_names = hpd_clk_names;
@@ -430,6 +476,9 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
430 config.mux_en_gpio = -1; 476 config.mux_en_gpio = -1;
431 config.mux_sel_gpio = -1; 477 config.mux_sel_gpio = -1;
432 } 478 }
479 config.mmio_name = "hdmi_msm_hdmi_addr";
480 config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
481
433 hdmi_cfg = &config; 482 hdmi_cfg = &config;
434#endif 483#endif
435 dev->platform_data = hdmi_cfg; 484 dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 68fdfb3622a5..d0e663192d01 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -37,6 +37,8 @@ struct hdmi_audio {
37 int rate; 37 int rate;
38}; 38};
39 39
40struct hdmi_hdcp_ctrl;
41
40struct hdmi { 42struct hdmi {
41 struct drm_device *dev; 43 struct drm_device *dev;
42 struct platform_device *pdev; 44 struct platform_device *pdev;
@@ -51,6 +53,8 @@ struct hdmi {
51 unsigned long int pixclock; 53 unsigned long int pixclock;
52 54
53 void __iomem *mmio; 55 void __iomem *mmio;
56 void __iomem *qfprom_mmio;
57 phys_addr_t mmio_phy_addr;
54 58
55 struct regulator **hpd_regs; 59 struct regulator **hpd_regs;
56 struct regulator **pwr_regs; 60 struct regulator **pwr_regs;
@@ -68,12 +72,25 @@ struct hdmi {
68 bool hdmi_mode; /* are we in hdmi mode? */ 72 bool hdmi_mode; /* are we in hdmi mode? */
69 73
70 int irq; 74 int irq;
75 struct workqueue_struct *workq;
76
77 struct hdmi_hdcp_ctrl *hdcp_ctrl;
78
79 /*
80 * spinlock to protect registers shared by different execution
81 * REG_HDMI_CTRL
82 * REG_HDMI_DDC_ARBITRATION
83 * REG_HDMI_HDCP_INT_CTRL
84 * REG_HDMI_HPD_CTRL
85 */
86 spinlock_t reg_lock;
71}; 87};
72 88
73/* platform config data (ie. from DT, or pdata) */ 89/* platform config data (ie. from DT, or pdata) */
74struct hdmi_platform_config { 90struct hdmi_platform_config {
75 struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); 91 struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
76 const char *mmio_name; 92 const char *mmio_name;
93 const char *qfprom_mmio_name;
77 94
78 /* regulators that need to be on for hpd: */ 95 /* regulators that need to be on for hpd: */
79 const char **hpd_reg_names; 96 const char **hpd_reg_names;
@@ -109,6 +126,11 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
109 return msm_readl(hdmi->mmio + reg); 126 return msm_readl(hdmi->mmio + reg);
110} 127}
111 128
129static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
130{
131 return msm_readl(hdmi->qfprom_mmio + reg);
132}
133
112/* 134/*
113 * The phy appears to be different, for example between 8960 and 8x60, 135 * The phy appears to be different, for example between 8960 and 8x60,
114 * so split the phy related functions out and load the correct one at 136 * so split the phy related functions out and load the correct one at
@@ -117,7 +139,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
117 139
118struct hdmi_phy_funcs { 140struct hdmi_phy_funcs {
119 void (*destroy)(struct hdmi_phy *phy); 141 void (*destroy)(struct hdmi_phy *phy);
120 void (*reset)(struct hdmi_phy *phy);
121 void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); 142 void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
122 void (*powerdown)(struct hdmi_phy *phy); 143 void (*powerdown)(struct hdmi_phy *phy);
123}; 144};
@@ -163,4 +184,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c);
163void hdmi_i2c_destroy(struct i2c_adapter *i2c); 184void hdmi_i2c_destroy(struct i2c_adapter *i2c);
164struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi); 185struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
165 186
187/*
188 * hdcp
189 */
190struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
191void hdmi_hdcp_destroy(struct hdmi *hdmi);
192void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
193void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
194void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
195
166#endif /* __HDMI_CONNECTOR_H__ */ 196#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e6f034808371..0b1b5586ff35 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -441,6 +441,12 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
441 441
442#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288 442#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288
443 443
444#define REG_HDMI_CEC_CTRL 0x0000028c
445
446#define REG_HDMI_CEC_WR_DATA 0x00000290
447
448#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294
449
444#define REG_HDMI_CEC_STATUS 0x00000298 450#define REG_HDMI_CEC_STATUS 0x00000298
445 451
446#define REG_HDMI_CEC_INT 0x0000029c 452#define REG_HDMI_CEC_INT 0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 872485f60134..df232e20c13e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -203,7 +203,6 @@ int hdmi_audio_update(struct hdmi *hdmi)
203 audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4); 203 audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
204 audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE; 204 audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
205 } else { 205 } else {
206 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
207 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT; 206 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
208 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND; 207 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
209 vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE; 208 vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index a7a1d8267cf0..92b69ae8caf9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -100,8 +100,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
100 hdmi_audio_update(hdmi); 100 hdmi_audio_update(hdmi);
101 } 101 }
102 102
103 phy->funcs->powerup(phy, hdmi->pixclock); 103 if (phy)
104 phy->funcs->powerup(phy, hdmi->pixclock);
105
104 hdmi_set_mode(hdmi, true); 106 hdmi_set_mode(hdmi, true);
107
108 if (hdmi->hdcp_ctrl)
109 hdmi_hdcp_on(hdmi->hdcp_ctrl);
105} 110}
106 111
107static void hdmi_bridge_enable(struct drm_bridge *bridge) 112static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -118,9 +123,14 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
118 struct hdmi *hdmi = hdmi_bridge->hdmi; 123 struct hdmi *hdmi = hdmi_bridge->hdmi;
119 struct hdmi_phy *phy = hdmi->phy; 124 struct hdmi_phy *phy = hdmi->phy;
120 125
126 if (hdmi->hdcp_ctrl)
127 hdmi_hdcp_off(hdmi->hdcp_ctrl);
128
121 DBG("power down"); 129 DBG("power down");
122 hdmi_set_mode(hdmi, false); 130 hdmi_set_mode(hdmi, false);
123 phy->funcs->powerdown(phy); 131
132 if (phy)
133 phy->funcs->powerdown(phy);
124 134
125 if (hdmi->power_on) { 135 if (hdmi->power_on) {
126 power_off(bridge); 136 power_off(bridge);
@@ -142,8 +152,6 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
142 152
143 hdmi->pixclock = mode->clock * 1000; 153 hdmi->pixclock = mode->clock * 1000;
144 154
145 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
146
147 hstart = mode->htotal - mode->hsync_start; 155 hstart = mode->htotal - mode->hsync_start;
148 hend = mode->htotal - mode->hsync_start + mode->hdisplay; 156 hend = mode->htotal - mode->hsync_start + mode->hdisplay;
149 157
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 54aa93ff5473..a3b05ae52dae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,6 +28,55 @@ struct hdmi_connector {
28}; 28};
29#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) 29#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
30 30
31static void hdmi_phy_reset(struct hdmi *hdmi)
32{
33 unsigned int val;
34
35 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
36
37 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
38 /* pull low */
39 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
40 val & ~HDMI_PHY_CTRL_SW_RESET);
41 } else {
42 /* pull high */
43 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
44 val | HDMI_PHY_CTRL_SW_RESET);
45 }
46
47 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
48 /* pull low */
49 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
50 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
51 } else {
52 /* pull high */
53 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
54 val | HDMI_PHY_CTRL_SW_RESET_PLL);
55 }
56
57 msleep(100);
58
59 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
60 /* pull high */
61 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
62 val | HDMI_PHY_CTRL_SW_RESET);
63 } else {
64 /* pull low */
65 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
66 val & ~HDMI_PHY_CTRL_SW_RESET);
67 }
68
69 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
70 /* pull high */
71 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
72 val | HDMI_PHY_CTRL_SW_RESET_PLL);
73 } else {
74 /* pull low */
75 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
76 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
77 }
78}
79
31static int gpio_config(struct hdmi *hdmi, bool on) 80static int gpio_config(struct hdmi *hdmi, bool on)
32{ 81{
33 struct device *dev = &hdmi->pdev->dev; 82 struct device *dev = &hdmi->pdev->dev;
@@ -35,21 +84,25 @@ static int gpio_config(struct hdmi *hdmi, bool on)
35 int ret; 84 int ret;
36 85
37 if (on) { 86 if (on) {
38 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); 87 if (config->ddc_clk_gpio != -1) {
39 if (ret) { 88 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
40 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", 89 if (ret) {
41 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); 90 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
42 goto error1; 91 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
92 goto error1;
93 }
94 gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
43 } 95 }
44 gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
45 96
46 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); 97 if (config->ddc_data_gpio != -1) {
47 if (ret) { 98 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
48 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", 99 if (ret) {
49 "HDMI_DDC_DATA", config->ddc_data_gpio, ret); 100 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
50 goto error2; 101 "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
102 goto error2;
103 }
104 gpio_set_value_cansleep(config->ddc_data_gpio, 1);
51 } 105 }
52 gpio_set_value_cansleep(config->ddc_data_gpio, 1);
53 106
54 ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); 107 ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
55 if (ret) { 108 if (ret) {
@@ -94,8 +147,12 @@ static int gpio_config(struct hdmi *hdmi, bool on)
94 } 147 }
95 DBG("gpio on"); 148 DBG("gpio on");
96 } else { 149 } else {
97 gpio_free(config->ddc_clk_gpio); 150 if (config->ddc_clk_gpio != -1)
98 gpio_free(config->ddc_data_gpio); 151 gpio_free(config->ddc_clk_gpio);
152
153 if (config->ddc_data_gpio != -1)
154 gpio_free(config->ddc_data_gpio);
155
99 gpio_free(config->hpd_gpio); 156 gpio_free(config->hpd_gpio);
100 157
101 if (config->mux_en_gpio != -1) { 158 if (config->mux_en_gpio != -1) {
@@ -126,9 +183,11 @@ error5:
126error4: 183error4:
127 gpio_free(config->hpd_gpio); 184 gpio_free(config->hpd_gpio);
128error3: 185error3:
129 gpio_free(config->ddc_data_gpio); 186 if (config->ddc_data_gpio != -1)
187 gpio_free(config->ddc_data_gpio);
130error2: 188error2:
131 gpio_free(config->ddc_clk_gpio); 189 if (config->ddc_clk_gpio != -1)
190 gpio_free(config->ddc_clk_gpio);
132error1: 191error1:
133 return ret; 192 return ret;
134} 193}
@@ -138,9 +197,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
138 struct hdmi *hdmi = hdmi_connector->hdmi; 197 struct hdmi *hdmi = hdmi_connector->hdmi;
139 const struct hdmi_platform_config *config = hdmi->config; 198 const struct hdmi_platform_config *config = hdmi->config;
140 struct device *dev = &hdmi->pdev->dev; 199 struct device *dev = &hdmi->pdev->dev;
141 struct hdmi_phy *phy = hdmi->phy;
142 uint32_t hpd_ctrl; 200 uint32_t hpd_ctrl;
143 int i, ret; 201 int i, ret;
202 unsigned long flags;
144 203
145 for (i = 0; i < config->hpd_reg_cnt; i++) { 204 for (i = 0; i < config->hpd_reg_cnt; i++) {
146 ret = regulator_enable(hdmi->hpd_regs[i]); 205 ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -181,7 +240,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
181 } 240 }
182 241
183 hdmi_set_mode(hdmi, false); 242 hdmi_set_mode(hdmi, false);
184 phy->funcs->reset(phy); 243 hdmi_phy_reset(hdmi);
185 hdmi_set_mode(hdmi, true); 244 hdmi_set_mode(hdmi, true);
186 245
187 hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); 246 hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -192,6 +251,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
192 HDMI_HPD_INT_CTRL_INT_EN); 251 HDMI_HPD_INT_CTRL_INT_EN);
193 252
194 /* set timeout to 4.1ms (max) for hardware debounce */ 253 /* set timeout to 4.1ms (max) for hardware debounce */
254 spin_lock_irqsave(&hdmi->reg_lock, flags);
195 hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); 255 hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
196 hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff); 256 hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
197 257
@@ -200,6 +260,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
200 ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl); 260 ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
201 hdmi_write(hdmi, REG_HDMI_HPD_CTRL, 261 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
202 HDMI_HPD_CTRL_ENABLE | hpd_ctrl); 262 HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
263 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
203 264
204 return 0; 265 return 0;
205 266
@@ -250,7 +311,6 @@ hotplug_work(struct work_struct *work)
250void hdmi_connector_irq(struct drm_connector *connector) 311void hdmi_connector_irq(struct drm_connector *connector)
251{ 312{
252 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); 313 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
253 struct msm_drm_private *priv = connector->dev->dev_private;
254 struct hdmi *hdmi = hdmi_connector->hdmi; 314 struct hdmi *hdmi = hdmi_connector->hdmi;
255 uint32_t hpd_int_status, hpd_int_ctrl; 315 uint32_t hpd_int_status, hpd_int_ctrl;
256 316
@@ -274,7 +334,7 @@ void hdmi_connector_irq(struct drm_connector *connector)
274 hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; 334 hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
275 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); 335 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
276 336
277 queue_work(priv->wq, &hdmi_connector->hpd_work); 337 queue_work(hdmi->workq, &hdmi_connector->hpd_work);
278 } 338 }
279} 339}
280 340
@@ -350,6 +410,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector)
350 410
351 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); 411 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
352 412
413 hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
353 drm_mode_connector_update_edid_property(connector, edid); 414 drm_mode_connector_update_edid_property(connector, edid);
354 415
355 if (edid) { 416 if (edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 000000000000..1dc9c34eb0df
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1437 @@
1/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include "hdmi.h"
15#include <linux/qcom_scm.h>
16
17#define HDCP_REG_ENABLE 0x01
18#define HDCP_REG_DISABLE 0x00
19#define HDCP_PORT_ADDR 0x74
20
21#define HDCP_INT_STATUS_MASK ( \
22 HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
23 HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
24 HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
25 HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
26
27#define AUTH_WORK_RETRIES_TIME 100
28#define AUTH_RETRIES_TIME 30
29
30/* QFPROM Registers for HDMI/HDCP */
31#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8
32#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC
33#define HDCP_KSV_LSB 0x000060D8
34#define HDCP_KSV_MSB 0x000060DC
35
36enum DS_TYPE { /* type of downstream device */
37 DS_UNKNOWN,
38 DS_RECEIVER,
39 DS_REPEATER,
40};
41
42enum hdmi_hdcp_state {
43 HDCP_STATE_NO_AKSV,
44 HDCP_STATE_INACTIVE,
45 HDCP_STATE_AUTHENTICATING,
46 HDCP_STATE_AUTHENTICATED,
47 HDCP_STATE_AUTH_FAILED
48};
49
50struct hdmi_hdcp_reg_data {
51 u32 reg_id;
52 u32 off;
53 char *name;
54 u32 reg_val;
55};
56
57struct hdmi_hdcp_ctrl {
58 struct hdmi *hdmi;
59 u32 auth_retries;
60 bool tz_hdcp;
61 enum hdmi_hdcp_state hdcp_state;
62 struct work_struct hdcp_auth_work;
63 struct work_struct hdcp_reauth_work;
64
65#define AUTH_ABORT_EV 1
66#define AUTH_RESULT_RDY_EV 2
67 unsigned long auth_event;
68 wait_queue_head_t auth_event_queue;
69
70 u32 ksv_fifo_w_index;
71 /*
72 * store aksv from qfprom
73 */
74 u32 aksv_lsb;
75 u32 aksv_msb;
76 bool aksv_valid;
77 u32 ds_type;
78 u32 bksv_lsb;
79 u32 bksv_msb;
80 u8 dev_count;
81 u8 depth;
82 u8 ksv_list[5 * 127];
83 bool max_cascade_exceeded;
84 bool max_dev_exceeded;
85};
86
87static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
88 u8 *data, u16 data_len)
89{
90 int rc;
91 int retry = 5;
92 struct i2c_msg msgs[] = {
93 {
94 .addr = addr >> 1,
95 .flags = 0,
96 .len = 1,
97 .buf = &offset,
98 }, {
99 .addr = addr >> 1,
100 .flags = I2C_M_RD,
101 .len = data_len,
102 .buf = data,
103 }
104 };
105
106 DBG("Start DDC read");
107retry:
108 rc = i2c_transfer(hdmi->i2c, msgs, 2);
109
110 retry--;
111 if (rc == 2)
112 rc = 0;
113 else if (retry > 0)
114 goto retry;
115 else
116 rc = -EIO;
117
118 DBG("End DDC read %d", rc);
119
120 return rc;
121}
122
123#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
124
125static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
126 u8 *data, u16 data_len)
127{
128 int rc;
129 int retry = 10;
130 u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
131 struct i2c_msg msgs[] = {
132 {
133 .addr = addr >> 1,
134 .flags = 0,
135 .len = 1,
136 }
137 };
138
139 DBG("Start DDC write");
140 if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
141 pr_err("%s: write size too big\n", __func__);
142 return -ERANGE;
143 }
144
145 buf[0] = offset;
146 memcpy(&buf[1], data, data_len);
147 msgs[0].buf = buf;
148 msgs[0].len = data_len + 1;
149retry:
150 rc = i2c_transfer(hdmi->i2c, msgs, 1);
151
152 retry--;
153 if (rc == 1)
154 rc = 0;
155 else if (retry > 0)
156 goto retry;
157 else
158 rc = -EIO;
159
160 DBG("End DDC write %d", rc);
161
162 return rc;
163}
164
165static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
166 u32 *pdata, u32 count)
167{
168 struct hdmi *hdmi = hdcp_ctrl->hdmi;
169 struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
170 u32 resp, phy_addr, idx = 0;
171 int i, ret = 0;
172
173 WARN_ON(!pdata || !preg || (count == 0));
174
175 if (hdcp_ctrl->tz_hdcp) {
176 phy_addr = (u32)hdmi->mmio_phy_addr;
177
178 while (count) {
179 memset(scm_buf, 0, sizeof(scm_buf));
180 for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
181 i++) {
182 scm_buf[i].addr = phy_addr + preg[idx];
183 scm_buf[i].val = pdata[idx];
184 idx++;
185 }
186 ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
187
188 if (ret || resp) {
189 pr_err("%s: error: scm_call ret=%d resp=%u\n",
190 __func__, ret, resp);
191 ret = -EINVAL;
192 break;
193 }
194
195 count -= i;
196 }
197 } else {
198 for (i = 0; i < count; i++)
199 hdmi_write(hdmi, preg[i], pdata[i]);
200 }
201
202 return ret;
203}
204
205void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
206{
207 struct hdmi *hdmi = hdcp_ctrl->hdmi;
208 u32 reg_val, hdcp_int_status;
209 unsigned long flags;
210
211 spin_lock_irqsave(&hdmi->reg_lock, flags);
212 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
213 hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
214 if (!hdcp_int_status) {
215 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
216 return;
217 }
218 /* Clear Interrupts */
219 reg_val |= hdcp_int_status << 1;
220 /* Clear AUTH_FAIL_INFO as well */
221 if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
222 reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
223 hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
224 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
225
226 DBG("hdcp irq %x", hdcp_int_status);
227
228 if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
229 pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
230 if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
231 set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
232 wake_up_all(&hdcp_ctrl->auth_event_queue);
233 }
234 }
235
236 if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
237 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
238 pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
239 __func__, reg_val);
240 if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
241 queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
242 else if (HDCP_STATE_AUTHENTICATING ==
243 hdcp_ctrl->hdcp_state) {
244 set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
245 wake_up_all(&hdcp_ctrl->auth_event_queue);
246 }
247 }
248}
249
250static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
251{
252 int rc;
253
254 rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
255 !!test_bit(ev, &hdcp_ctrl->auth_event),
256 msecs_to_jiffies(ms));
257 if (rc) {
258 pr_info("%s: msleep is canceled by event %d\n",
259 __func__, ev);
260 clear_bit(ev, &hdcp_ctrl->auth_event);
261 return -ECANCELED;
262 }
263
264 return 0;
265}
266
267static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
268{
269 struct hdmi *hdmi = hdcp_ctrl->hdmi;
270
271 /* Fetch aksv from QFPROM, this info should be public. */
272 hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
273 hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
274
275 /* check there are 20 ones in AKSV */
276 if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
277 != 20) {
278 pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
279 __func__);
280 pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
281 __func__, hdcp_ctrl->aksv_msb,
282 hdcp_ctrl->aksv_lsb);
283 return -EINVAL;
284 }
285 DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
286
287 return 0;
288}
289
290static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
291{
292 struct hdmi *hdmi = hdcp_ctrl->hdmi;
293 u32 reg_val, failure, nack0;
294 int rc = 0;
295
296 /* Check for any DDC transfer failures */
297 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
298 failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
299 nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
300 DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
301 reg_val, failure, nack0);
302
303 if (failure) {
304 /*
305 * Indicates that the last HDCP HW DDC transfer failed.
306 * This occurs when a transfer is attempted with HDCP DDC
307 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
308 * matches HDCP_DDC_RETRY_CNT.
309 * Failure occurred, let's clear it.
310 */
311 DBG("DDC failure detected");
312
313 /* First, Disable DDC */
314 hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
315 HDMI_HDCP_DDC_CTRL_0_DISABLE);
316
317 /* ACK the Failure to Clear it */
318 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
319 reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
320 hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
321
322 /* Check if the FAILURE got Cleared */
323 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
324 if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
325 pr_info("%s: Unable to clear HDCP DDC Failure\n",
326 __func__);
327
328 /* Re-Enable HDCP DDC */
329 hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
330 }
331
332 if (nack0) {
333 DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
334 hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
335 /* Reset HDMI DDC software status */
336 reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
337 reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
338 hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
339
340 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
341
342 reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
343 reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
344 hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
345
346 /* Reset HDMI DDC Controller */
347 reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
348 reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
349 hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
350
351 /* If previous msleep is aborted, skip this msleep */
352 if (!rc)
353 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
354
355 reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
356 reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
357 hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
358 DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
359 hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
360 }
361
362 return rc;
363}
364
365static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
366{
367 int rc;
368 u32 hdcp_ddc_status, ddc_hw_status;
369 u32 xfer_done, xfer_req, hw_done;
370 bool hw_not_ready;
371 u32 timeout_count;
372 struct hdmi *hdmi = hdcp_ctrl->hdmi;
373
374 if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
375 return 0;
376
377 /* Wait to be clean on DDC HW engine */
378 timeout_count = 100;
379 do {
380 hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
381 ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
382
383 xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
384 xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
385 hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
386 hw_not_ready = !xfer_done || xfer_req || !hw_done;
387
388 if (hw_not_ready)
389 break;
390
391 timeout_count--;
392 if (!timeout_count) {
393 pr_warn("%s: hw_ddc_clean failed\n", __func__);
394 return -ETIMEDOUT;
395 }
396
397 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
398 if (rc)
399 return rc;
400 } while (1);
401
402 return 0;
403}
404
405static void hdmi_hdcp_reauth_work(struct work_struct *work)
406{
407 struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
408 struct hdmi_hdcp_ctrl, hdcp_reauth_work);
409 struct hdmi *hdmi = hdcp_ctrl->hdmi;
410 unsigned long flags;
411 u32 reg_val;
412
413 DBG("HDCP REAUTH WORK");
414 /*
415 * Disable HPD circuitry.
416 * This is needed to reset the HDCP cipher engine so that when we
417 * attempt a re-authentication, HW would clear the AN0_READY and
418 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
419 */
420 spin_lock_irqsave(&hdmi->reg_lock, flags);
421 reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
422 reg_val &= ~HDMI_HPD_CTRL_ENABLE;
423 hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
424
425 /* Disable HDCP interrupts */
426 hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
427 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
428
429 hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
430 HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
431
432 /* Wait to be clean on DDC HW engine */
433 if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
434 pr_info("%s: reauth work aborted\n", __func__);
435 return;
436 }
437
438 /* Disable encryption and disable the HDCP block */
439 hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
440
441 /* Enable HPD circuitry */
442 spin_lock_irqsave(&hdmi->reg_lock, flags);
443 reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
444 reg_val |= HDMI_HPD_CTRL_ENABLE;
445 hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
446 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
447
448 /*
449 * Only retry defined times then abort current authenticating process
450 */
451 if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
452 hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
453 hdcp_ctrl->auth_retries = 0;
454 pr_info("%s: abort reauthentication!\n", __func__);
455
456 return;
457 }
458
459 DBG("Queue AUTH WORK");
460 hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
461 queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
462}
463
464static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
465{
466 struct hdmi *hdmi = hdcp_ctrl->hdmi;
467 u32 link0_status;
468 u32 reg_val;
469 unsigned long flags;
470 int rc;
471
472 if (!hdcp_ctrl->aksv_valid) {
473 rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
474 if (rc) {
475 pr_err("%s: ASKV validation failed\n", __func__);
476 hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
477 return -ENOTSUPP;
478 }
479 hdcp_ctrl->aksv_valid = true;
480 }
481
482 spin_lock_irqsave(&hdmi->reg_lock, flags);
483 /* disable HDMI Encrypt */
484 reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
485 reg_val &= ~HDMI_CTRL_ENCRYPTED;
486 hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
487
488 /* Enabling Software DDC */
489 reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
490 reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
491 hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
492 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
493
494 /*
495 * Write AKSV read from QFPROM to the HDCP registers.
496 * This step is needed for HDCP authentication and must be
497 * written before enabling HDCP.
498 */
499 hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
500 hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
501
502 /*
503 * HDCP setup prior to enabling HDCP_CTRL.
504 * Setup seed values for random number An.
505 */
506 hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
507 hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
508
509 /* Disable the RngCipher state */
510 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
511 reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
512 hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
513 DBG("HDCP_DEBUG_CTRL=0x%08x",
514 hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
515
516 /*
517 * Ensure that all register writes are completed before
518 * enabling HDCP cipher
519 */
520 wmb();
521
522 /*
523 * Enable HDCP
524 * This needs to be done as early as possible in order for the
525 * hardware to make An available to read
526 */
527 hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
528
529 /*
530 * If we had stale values for the An ready bit, it should most
531 * likely be cleared now after enabling HDCP cipher
532 */
533 link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
534 DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
535 if (!(link0_status &
536 (HDMI_HDCP_LINK0_STATUS_AN_0_READY |
537 HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
538 DBG("An not ready after enabling HDCP");
539
540 /* Clear any DDC failures from previous tries before enable HDCP*/
541 rc = reset_hdcp_ddc_failures(hdcp_ctrl);
542
543 return rc;
544}
545
546static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
547{
548 struct hdmi *hdmi = hdcp_ctrl->hdmi;
549 u32 reg_val;
550 unsigned long flags;
551
552 DBG("hdcp auth failed, queue reauth work");
553 /* clear HDMI Encrypt */
554 spin_lock_irqsave(&hdmi->reg_lock, flags);
555 reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
556 reg_val &= ~HDMI_CTRL_ENCRYPTED;
557 hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
558 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
559
560 hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
561 queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
562}
563
564static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
565{
566 struct hdmi *hdmi = hdcp_ctrl->hdmi;
567 u32 reg_val;
568 unsigned long flags;
569
570 /*
571 * Disable software DDC before going into part3 to make sure
572 * there is no Arbitration between software and hardware for DDC
573 */
574 spin_lock_irqsave(&hdmi->reg_lock, flags);
575 reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
576 reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
577 hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
578 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
579
580 /* enable HDMI Encrypt */
581 spin_lock_irqsave(&hdmi->reg_lock, flags);
582 reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
583 reg_val |= HDMI_CTRL_ENCRYPTED;
584 hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
585 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
586
587 hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
588 hdcp_ctrl->auth_retries = 0;
589}
590
591/*
592 * hdcp authenticating part 1
593 * Wait Key/An ready
594 * Read BCAPS from sink
595 * Write BCAPS and AKSV into HDCP engine
596 * Write An and AKSV to sink
597 * Read BKSV from sink and write into HDCP engine
598 */
599static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
600{
601 int rc;
602 struct hdmi *hdmi = hdcp_ctrl->hdmi;
603 u32 link0_status, keys_state;
604 u32 timeout_count;
605 bool an_ready;
606
607 /* Wait for HDCP keys to be checked and validated */
608 timeout_count = 100;
609 do {
610 link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
611 keys_state = (link0_status >> 28) & 0x7;
612 if (keys_state == HDCP_KEYS_STATE_VALID)
613 break;
614
615 DBG("Keys not ready(%d). s=%d, l0=%0x08x",
616 timeout_count, keys_state, link0_status);
617
618 timeout_count--;
619 if (!timeout_count) {
620 pr_err("%s: Wait key state timedout", __func__);
621 return -ETIMEDOUT;
622 }
623
624 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
625 if (rc)
626 return rc;
627 } while (1);
628
629 timeout_count = 100;
630 do {
631 link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
632 an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
633 && (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
634 if (an_ready)
635 break;
636
637 DBG("An not ready(%d). l0_status=0x%08x",
638 timeout_count, link0_status);
639
640 timeout_count--;
641 if (!timeout_count) {
642 pr_err("%s: Wait An timedout", __func__);
643 return -ETIMEDOUT;
644 }
645
646 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
647 if (rc)
648 return rc;
649 } while (1);
650
651 return 0;
652}
653
654static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
655{
656 int rc = 0;
657 struct hdmi *hdmi = hdcp_ctrl->hdmi;
658 u32 link0_aksv_0, link0_aksv_1;
659 u32 link0_an[2];
660 u8 aksv[5];
661
662 /* Read An0 and An1 */
663 link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
664 link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
665
666 /* Read AKSV */
667 link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
668 link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
669
670 DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
671 /* Copy An and AKSV to byte arrays for transmission */
672 aksv[0] = link0_aksv_0 & 0xFF;
673 aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
674 aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
675 aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
676 aksv[4] = link0_aksv_1 & 0xFF;
677
678 /* Write An to offset 0x18 */
679 rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
680 (u16)sizeof(link0_an));
681 if (rc) {
682 pr_err("%s:An write failed\n", __func__);
683 return rc;
684 }
685 DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
686
687 /* Write AKSV to offset 0x10 */
688 rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
689 if (rc) {
690 pr_err("%s:AKSV write failed\n", __func__);
691 return rc;
692 }
693 DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
694
695 return 0;
696}
697
698static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
699{
700 int rc = 0;
701 struct hdmi *hdmi = hdcp_ctrl->hdmi;
702 u8 bksv[5];
703 u32 reg[2], data[2];
704
705 /* Read BKSV at offset 0x00 */
706 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
707 if (rc) {
708 pr_err("%s:BKSV read failed\n", __func__);
709 return rc;
710 }
711
712 hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
713 (bksv[2] << 16) | (bksv[3] << 24);
714 hdcp_ctrl->bksv_msb = bksv[4];
715 DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
716
717 /* check there are 20 ones in BKSV */
718 if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
719 != 20) {
720 pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
721 pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
722 bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
723 return -EINVAL;
724 }
725
726 /* Write BKSV read from sink to HDCP registers */
727 reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
728 data[0] = hdcp_ctrl->bksv_lsb;
729 reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
730 data[1] = hdcp_ctrl->bksv_msb;
731 rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
732
733 return rc;
734}
735
736static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
737{
738 int rc = 0;
739 struct hdmi *hdmi = hdcp_ctrl->hdmi;
740 u32 reg, data;
741 u8 bcaps;
742
743 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
744 if (rc) {
745 pr_err("%s:BCAPS read failed\n", __func__);
746 return rc;
747 }
748 DBG("BCAPS=%02x", bcaps);
749
750 /* receiver (0), repeater (1) */
751 hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
752
753 /* Write BCAPS to the hardware */
754 reg = REG_HDMI_HDCP_RCVPORT_DATA12;
755 data = (u32)bcaps;
756 rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
757
758 return rc;
759}
760
761static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
762{
763 struct hdmi *hdmi = hdcp_ctrl->hdmi;
764 unsigned long flags;
765 int rc;
766
767 /* Wait for AKSV key and An ready */
768 rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
769 if (rc) {
770 pr_err("%s: wait key and an ready failed\n", __func__);
771 return rc;
772 };
773
774 /* Read BCAPS and send to HDCP engine */
775 rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
776 if (rc) {
777 pr_err("%s: read bcaps error, abort\n", __func__);
778 return rc;
779 }
780
781 /*
782 * 1.1_Features turned off by default.
783 * No need to write AInfo since 1.1_Features is disabled.
784 */
785 hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
786
787 /* Send AKSV and An to sink */
788 rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
789 if (rc) {
790 pr_err("%s:An/Aksv write failed\n", __func__);
791 return rc;
792 }
793
794 /* Read BKSV and send to HDCP engine*/
795 rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
796 if (rc) {
797 pr_err("%s:BKSV Process failed\n", __func__);
798 return rc;
799 }
800
801 /* Enable HDCP interrupts and ack/clear any stale interrupts */
802 spin_lock_irqsave(&hdmi->reg_lock, flags);
803 hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
804 HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
805 HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
806 HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
807 HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
808 HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
809 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
810
811 return 0;
812}
813
814/* read R0' from sink and pass it to HDCP engine */
815static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
816{
817 struct hdmi *hdmi = hdcp_ctrl->hdmi;
818 int rc = 0;
819 u8 buf[2];
820
821 /*
822 * HDCP Compliance Test case 1A-01:
823 * Wait here at least 100ms before reading R0'
824 */
825 rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
826 if (rc)
827 return rc;
828
829 /* Read R0' at offset 0x08 */
830 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
831 if (rc) {
832 pr_err("%s:R0' read failed\n", __func__);
833 return rc;
834 }
835 DBG("R0'=%02x%02x", buf[1], buf[0]);
836
837 /* Write R0' to HDCP registers and check to see if it is a match */
838 hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
839 (((u32)buf[1]) << 8) | buf[0]);
840
841 return 0;
842}
843
844/* Wait for authenticating result: R0/R0' are matched or not */
845static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
846{
847 struct hdmi *hdmi = hdcp_ctrl->hdmi;
848 u32 link0_status;
849 int rc;
850
851 /* wait for hdcp irq, 10 sec should be long enough */
852 rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
853 if (!rc) {
854 pr_err("%s: Wait Auth IRQ timeout\n", __func__);
855 return -ETIMEDOUT;
856 }
857
858 link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
859 if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
860 pr_err("%s: Authentication Part I failed\n", __func__);
861 return -EINVAL;
862 }
863
864 /* Enable HDCP Encryption */
865 hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
866 HDMI_HDCP_CTRL_ENABLE |
867 HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
868
869 return 0;
870}
871
872static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
873 u16 *pbstatus)
874{
875 int rc;
876 struct hdmi *hdmi = hdcp_ctrl->hdmi;
877 bool max_devs_exceeded = false, max_cascade_exceeded = false;
878 u32 repeater_cascade_depth = 0, down_stream_devices = 0;
879 u16 bstatus;
880 u8 buf[2];
881
882 /* Read BSTATUS at offset 0x41 */
883 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
884 if (rc) {
885 pr_err("%s: BSTATUS read failed\n", __func__);
886 goto error;
887 }
888 *pbstatus = bstatus = (buf[1] << 8) | buf[0];
889
890
891 down_stream_devices = bstatus & 0x7F;
892 repeater_cascade_depth = (bstatus >> 8) & 0x7;
893 max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
894 max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
895
896 if (down_stream_devices == 0) {
897 /*
898 * If no downstream devices are attached to the repeater
899 * then part II fails.
900 * todo: The other approach would be to continue PART II.
901 */
902 pr_err("%s: No downstream devices\n", __func__);
903 rc = -EINVAL;
904 goto error;
905 }
906
907 /*
908 * HDCP Compliance 1B-05:
909 * Check if no. of devices connected to repeater
910 * exceed max_devices_connected from bit 7 of Bstatus.
911 */
912 if (max_devs_exceeded) {
913 pr_err("%s: no. of devs connected exceeds max allowed",
914 __func__);
915 rc = -EINVAL;
916 goto error;
917 }
918
919 /*
920 * HDCP Compliance 1B-06:
921 * Check if no. of cascade connected to repeater
922 * exceed max_cascade_connected from bit 11 of Bstatus.
923 */
924 if (max_cascade_exceeded) {
925 pr_err("%s: no. of cascade conn exceeds max allowed",
926 __func__);
927 rc = -EINVAL;
928 goto error;
929 }
930
931error:
932 hdcp_ctrl->dev_count = down_stream_devices;
933 hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
934 hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
935 hdcp_ctrl->depth = repeater_cascade_depth;
936 return rc;
937}
938
939static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
940 struct hdmi_hdcp_ctrl *hdcp_ctrl)
941{
942 int rc;
943 struct hdmi *hdmi = hdcp_ctrl->hdmi;
944 u32 reg, data;
945 u32 timeout_count;
946 u16 bstatus;
947 u8 bcaps;
948
949 /*
950 * Wait until READY bit is set in BCAPS, as per HDCP specifications
951 * maximum permitted time to check for READY bit is five seconds.
952 */
953 timeout_count = 100;
954 do {
955 /* Read BCAPS at offset 0x40 */
956 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
957 if (rc) {
958 pr_err("%s: BCAPS read failed\n", __func__);
959 return rc;
960 }
961
962 if (bcaps & BIT(5))
963 break;
964
965 timeout_count--;
966 if (!timeout_count) {
967 pr_err("%s: Wait KSV fifo ready timedout", __func__);
968 return -ETIMEDOUT;
969 }
970
971 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
972 if (rc)
973 return rc;
974 } while (1);
975
976 rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
977 if (rc) {
978 pr_err("%s: bstatus error\n", __func__);
979 return rc;
980 }
981
982 /* Write BSTATUS and BCAPS to HDCP registers */
983 reg = REG_HDMI_HDCP_RCVPORT_DATA12;
984 data = bcaps | (bstatus << 8);
985 rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
986 if (rc) {
987 pr_err("%s: BSTATUS write failed\n", __func__);
988 return rc;
989 }
990
991 return 0;
992}
993
994/*
995 * hdcp authenticating part 2: 2nd
996 * read ksv fifo from sink
997 * transfer V' from sink to HDCP engine
998 * reset SHA engine
999 */
1000static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1001{
1002 struct hdmi *hdmi = hdcp_ctrl->hdmi;
1003 int rc = 0;
1004 struct hdmi_hdcp_reg_data reg_data[] = {
1005 {REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
1006 {REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
1007 {REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
1008 {REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
1009 {REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
1010 };
1011 struct hdmi_hdcp_reg_data *rd;
1012 u32 size = ARRAY_SIZE(reg_data);
1013 u32 reg[ARRAY_SIZE(reg_data)];
1014 u32 data[ARRAY_SIZE(reg_data)];
1015 int i;
1016
1017 for (i = 0; i < size; i++) {
1018 rd = &reg_data[i];
1019 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
1020 rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
1021 if (rc) {
1022 pr_err("%s: Read %s failed\n", __func__, rd->name);
1023 goto error;
1024 }
1025
1026 DBG("%s =%x", rd->name, data[i]);
1027 reg[i] = reg_data[i].reg_id;
1028 }
1029
1030 rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
1031
1032error:
1033 return rc;
1034}
1035
1036static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1037{
1038 int rc;
1039 struct hdmi *hdmi = hdcp_ctrl->hdmi;
1040 u32 ksv_bytes;
1041
1042 ksv_bytes = 5 * hdcp_ctrl->dev_count;
1043
1044 rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
1045 hdcp_ctrl->ksv_list, ksv_bytes);
1046 if (rc)
1047 pr_err("%s: KSV FIFO read failed\n", __func__);
1048
1049 return rc;
1050}
1051
1052static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1053{
1054 u32 reg[2], data[2];
1055 u32 rc = 0;
1056
1057 reg[0] = REG_HDMI_HDCP_SHA_CTRL;
1058 data[0] = HDCP_REG_ENABLE;
1059 reg[1] = REG_HDMI_HDCP_SHA_CTRL;
1060 data[1] = HDCP_REG_DISABLE;
1061
1062 rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
1063
1064 return rc;
1065}
1066
1067static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
1068 struct hdmi_hdcp_ctrl *hdcp_ctrl)
1069{
1070 int rc;
1071 u32 timeout_count;
1072
1073 /*
1074 * Read KSV FIFO over DDC
1075 * Key Selection vector FIFO Used to pull downstream KSVs
1076 * from HDCP Repeaters.
1077 * All bytes (DEVICE_COUNT * 5) must be read in a single,
1078 * auto incrementing access.
1079 * All bytes read as 0x00 for HDCP Receivers that are not
1080 * HDCP Repeaters (REPEATER == 0).
1081 */
1082 timeout_count = 100;
1083 do {
1084 rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
1085 if (!rc)
1086 break;
1087
1088 timeout_count--;
1089 if (!timeout_count) {
1090 pr_err("%s: Recv ksv fifo timedout", __func__);
1091 return -ETIMEDOUT;
1092 }
1093
1094 rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
1095 if (rc)
1096 return rc;
1097 } while (1);
1098
1099 rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
1100 if (rc) {
1101 pr_err("%s: transfer V failed\n", __func__);
1102 return rc;
1103 }
1104
1105 /* reset SHA engine before write ksv fifo */
1106 rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
1107 if (rc) {
1108 pr_err("%s: fail to reset sha engine\n", __func__);
1109 return rc;
1110 }
1111
1112 return 0;
1113}
1114
1115/*
1116 * Write KSV FIFO to HDCP_SHA_DATA.
1117 * This is done 1 byte at time starting with the LSB.
1118 * Once 64 bytes have been written, we need to poll for
1119 * HDCP_SHA_BLOCK_DONE before writing any further
1120 * If the last byte is written, we need to poll for
1121 * HDCP_SHA_COMP_DONE to wait until HW finish
1122 */
1123static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1124{
1125 int i;
1126 struct hdmi *hdmi = hdcp_ctrl->hdmi;
1127 u32 ksv_bytes, last_byte = 0;
1128 u8 *ksv_fifo = NULL;
1129 u32 reg_val, data, reg;
1130 u32 rc = 0;
1131
1132 ksv_bytes = 5 * hdcp_ctrl->dev_count;
1133
1134 /* Check if need to wait for HW completion */
1135 if (hdcp_ctrl->ksv_fifo_w_index) {
1136 reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
1137 DBG("HDCP_SHA_STATUS=%08x", reg_val);
1138 if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
1139 /* check COMP_DONE if last write */
1140 if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
1141 DBG("COMP_DONE");
1142 return 0;
1143 } else {
1144 return -EAGAIN;
1145 }
1146 } else {
1147 /* check BLOCK_DONE if not last write */
1148 if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
1149 return -EAGAIN;
1150
1151 DBG("BLOCK_DONE");
1152 }
1153 }
1154
1155 ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index;
1156 if (ksv_bytes <= 64)
1157 last_byte = 1;
1158 else
1159 ksv_bytes = 64;
1160
1161 ksv_fifo = hdcp_ctrl->ksv_list;
1162 ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
1163
1164 for (i = 0; i < ksv_bytes; i++) {
1165 /* Write KSV byte and set DONE bit[0] for last byte*/
1166 reg_val = ksv_fifo[i] << 16;
1167 if ((i == (ksv_bytes - 1)) && last_byte)
1168 reg_val |= HDMI_HDCP_SHA_DATA_DONE;
1169
1170 reg = REG_HDMI_HDCP_SHA_DATA;
1171 data = reg_val;
1172 rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
1173
1174 if (rc)
1175 return rc;
1176 }
1177
1178 hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
1179
1180 /*
1181 *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
1182 */
1183 return -EAGAIN;
1184}
1185
1186/* write ksv fifo into HDCP engine */
1187static int hdmi_hdcp_auth_part2_write_ksv_fifo(
1188 struct hdmi_hdcp_ctrl *hdcp_ctrl)
1189{
1190 int rc;
1191 u32 timeout_count;
1192
1193 hdcp_ctrl->ksv_fifo_w_index = 0;
1194 timeout_count = 100;
1195 do {
1196 rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
1197 if (!rc)
1198 break;
1199
1200 if (rc != -EAGAIN)
1201 return rc;
1202
1203 timeout_count--;
1204 if (!timeout_count) {
1205 pr_err("%s: Write KSV fifo timedout", __func__);
1206 return -ETIMEDOUT;
1207 }
1208
1209 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
1210 if (rc)
1211 return rc;
1212 } while (1);
1213
1214 return 0;
1215}
1216
1217static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1218{
1219 int rc = 0;
1220 struct hdmi *hdmi = hdcp_ctrl->hdmi;
1221 u32 link0_status;
1222 u32 timeout_count = 100;
1223
1224 do {
1225 link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
1226 if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
1227 break;
1228
1229 timeout_count--;
1230 if (!timeout_count) {
1231 pr_err("%s: HDCP V Match timedout", __func__);
1232 return -ETIMEDOUT;
1233 }
1234
1235 rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
1236 if (rc)
1237 return rc;
1238 } while (1);
1239
1240 return 0;
1241}
1242
1243static void hdmi_hdcp_auth_work(struct work_struct *work)
1244{
1245 struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
1246 struct hdmi_hdcp_ctrl, hdcp_auth_work);
1247 int rc;
1248
1249 rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
1250 if (rc) {
1251 pr_err("%s: auth prepare failed %d\n", __func__, rc);
1252 goto end;
1253 }
1254
1255 /* HDCP PartI */
1256 rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
1257 if (rc) {
1258 pr_err("%s: key exchange failed %d\n", __func__, rc);
1259 goto end;
1260 }
1261
1262 rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
1263 if (rc) {
1264 pr_err("%s: receive r0 failed %d\n", __func__, rc);
1265 goto end;
1266 }
1267
1268 rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
1269 if (rc) {
1270 pr_err("%s: verify r0 failed %d\n", __func__, rc);
1271 goto end;
1272 }
1273 pr_info("%s: Authentication Part I successful\n", __func__);
1274 if (hdcp_ctrl->ds_type == DS_RECEIVER)
1275 goto end;
1276
1277 /* HDCP PartII */
1278 rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
1279 if (rc) {
1280 pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
1281 goto end;
1282 }
1283
1284 rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
1285 if (rc) {
1286 pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
1287 goto end;
1288 }
1289
1290 rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
1291 if (rc) {
1292 pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
1293 goto end;
1294 }
1295
1296 rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
1297 if (rc)
1298 pr_err("%s: check v match failed %d\n", __func__, rc);
1299
1300end:
1301 if (rc == -ECANCELED) {
1302 pr_info("%s: hdcp authentication canceled\n", __func__);
1303 } else if (rc == -ENOTSUPP) {
1304 pr_info("%s: hdcp is not supported\n", __func__);
1305 } else if (rc) {
1306 pr_err("%s: hdcp authentication failed\n", __func__);
1307 hdmi_hdcp_auth_fail(hdcp_ctrl);
1308 } else {
1309 hdmi_hdcp_auth_done(hdcp_ctrl);
1310 }
1311}
1312
1313void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1314{
1315 struct hdmi *hdmi = hdcp_ctrl->hdmi;
1316 u32 reg_val;
1317 unsigned long flags;
1318
1319 if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
1320 (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
1321 DBG("still active or activating or no askv. returning");
1322 return;
1323 }
1324
1325 /* clear HDMI Encrypt */
1326 spin_lock_irqsave(&hdmi->reg_lock, flags);
1327 reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
1328 reg_val &= ~HDMI_CTRL_ENCRYPTED;
1329 hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
1330 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
1331
1332 hdcp_ctrl->auth_event = 0;
1333 hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
1334 hdcp_ctrl->auth_retries = 0;
1335 queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
1336}
1337
1338void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
1339{
1340 struct hdmi *hdmi = hdcp_ctrl->hdmi;
1341 unsigned long flags;
1342 u32 reg_val;
1343
1344 if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
1345 (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
1346 DBG("hdcp inactive or no aksv. returning");
1347 return;
1348 }
1349
1350 /*
1351 * Disable HPD circuitry.
1352 * This is needed to reset the HDCP cipher engine so that when we
1353 * attempt a re-authentication, HW would clear the AN0_READY and
1354 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
1355 */
1356 spin_lock_irqsave(&hdmi->reg_lock, flags);
1357 reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
1358 reg_val &= ~HDMI_HPD_CTRL_ENABLE;
1359 hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
1360
1361 /*
1362 * Disable HDCP interrupts.
1363 * Also, need to set the state to inactive here so that any ongoing
1364 * reauth works will know that the HDCP session has been turned off.
1365 */
1366 hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
1367 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
1368
1369 /*
1370 * Cancel any pending auth/reauth attempts.
1371 * If one is ongoing, this will wait for it to finish.
1372 * No more reauthentication attempts will be scheduled since we
1373 * set the current state to inactive.
1374 */
1375 set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
1376 wake_up_all(&hdcp_ctrl->auth_event_queue);
1377 cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
1378 cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
1379
1380 hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
1381 HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
1382
1383 /* Disable encryption and disable the HDCP block */
1384 hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
1385
1386 spin_lock_irqsave(&hdmi->reg_lock, flags);
1387 reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
1388 reg_val &= ~HDMI_CTRL_ENCRYPTED;
1389 hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
1390
1391 /* Enable HPD circuitry */
1392 reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
1393 reg_val |= HDMI_HPD_CTRL_ENABLE;
1394 hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
1395 spin_unlock_irqrestore(&hdmi->reg_lock, flags);
1396
1397 hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
1398
1399 DBG("HDCP: Off");
1400}
1401
1402struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
1403{
1404 struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
1405
1406 if (!hdmi->qfprom_mmio) {
1407 pr_err("%s: HDCP is not supported without qfprom\n",
1408 __func__);
1409 return ERR_PTR(-EINVAL);
1410 }
1411
1412 hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
1413 if (!hdcp_ctrl)
1414 return ERR_PTR(-ENOMEM);
1415
1416 INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
1417 INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
1418 init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
1419 hdcp_ctrl->hdmi = hdmi;
1420 hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
1421 hdcp_ctrl->aksv_valid = false;
1422
1423 if (qcom_scm_hdcp_available())
1424 hdcp_ctrl->tz_hdcp = true;
1425 else
1426 hdcp_ctrl->tz_hdcp = false;
1427
1428 return hdcp_ctrl;
1429}
1430
1431void hdmi_hdcp_destroy(struct hdmi *hdmi)
1432{
1433 if (hdmi && hdmi->hdcp_ctrl) {
1434 kfree(hdmi->hdcp_ctrl);
1435 hdmi->hdcp_ctrl = NULL;
1436 }
1437}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 6997ec636c6d..3a01cb5051e2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -426,57 +426,6 @@ static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
426 kfree(phy_8960); 426 kfree(phy_8960);
427} 427}
428 428
429static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
430{
431 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
432 struct hdmi *hdmi = phy_8960->hdmi;
433 unsigned int val;
434
435 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
436
437 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
438 /* pull low */
439 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
440 val & ~HDMI_PHY_CTRL_SW_RESET);
441 } else {
442 /* pull high */
443 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
444 val | HDMI_PHY_CTRL_SW_RESET);
445 }
446
447 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
448 /* pull low */
449 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
450 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
451 } else {
452 /* pull high */
453 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
454 val | HDMI_PHY_CTRL_SW_RESET_PLL);
455 }
456
457 msleep(100);
458
459 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
460 /* pull high */
461 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
462 val | HDMI_PHY_CTRL_SW_RESET);
463 } else {
464 /* pull low */
465 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
466 val & ~HDMI_PHY_CTRL_SW_RESET);
467 }
468
469 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
470 /* pull high */
471 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
472 val | HDMI_PHY_CTRL_SW_RESET_PLL);
473 } else {
474 /* pull low */
475 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
476 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
477 }
478}
479
480static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, 429static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
481 unsigned long int pixclock) 430 unsigned long int pixclock)
482{ 431{
@@ -511,7 +460,6 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
511 460
512static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = { 461static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
513 .destroy = hdmi_phy_8960_destroy, 462 .destroy = hdmi_phy_8960_destroy,
514 .reset = hdmi_phy_8960_reset,
515 .powerup = hdmi_phy_8960_powerup, 463 .powerup = hdmi_phy_8960_powerup,
516 .powerdown = hdmi_phy_8960_powerdown, 464 .powerdown = hdmi_phy_8960_powerdown,
517}; 465};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 391433c1af7c..cb01421ae1e4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -29,37 +29,6 @@ static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
29 kfree(phy_8x60); 29 kfree(phy_8x60);
30} 30}
31 31
32static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
35 struct hdmi *hdmi = phy_8x60->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 msleep(100);
51
52 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
53 /* pull high */
54 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
55 val | HDMI_PHY_CTRL_SW_RESET);
56 } else {
57 /* pull low */
58 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
59 val & ~HDMI_PHY_CTRL_SW_RESET);
60 }
61}
62
63static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, 32static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
64 unsigned long int pixclock) 33 unsigned long int pixclock)
65{ 34{
@@ -182,7 +151,6 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
182 151
183static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { 152static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
184 .destroy = hdmi_phy_8x60_destroy, 153 .destroy = hdmi_phy_8x60_destroy,
185 .reset = hdmi_phy_8x60_reset,
186 .powerup = hdmi_phy_8x60_powerup, 154 .powerup = hdmi_phy_8x60_powerup,
187 .powerdown = hdmi_phy_8x60_powerdown, 155 .powerdown = hdmi_phy_8x60_powerdown,
188}; 156};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 59fa6cdacb2a..56ab8917ee9a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -19,7 +19,6 @@
19 19
20struct hdmi_phy_8x74 { 20struct hdmi_phy_8x74 {
21 struct hdmi_phy base; 21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23 void __iomem *mmio; 22 void __iomem *mmio;
24}; 23};
25#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) 24#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
@@ -41,59 +40,6 @@ static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
41 kfree(phy_8x74); 40 kfree(phy_8x74);
42} 41}
43 42
44static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
45{
46 struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
47 struct hdmi *hdmi = phy_8x74->hdmi;
48 unsigned int val;
49
50 /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
51
52 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
53
54 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
55 /* pull low */
56 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
57 val & ~HDMI_PHY_CTRL_SW_RESET);
58 } else {
59 /* pull high */
60 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
61 val | HDMI_PHY_CTRL_SW_RESET);
62 }
63
64 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
65 /* pull low */
66 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
67 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
68 } else {
69 /* pull high */
70 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
71 val | HDMI_PHY_CTRL_SW_RESET_PLL);
72 }
73
74 msleep(100);
75
76 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
77 /* pull high */
78 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
79 val | HDMI_PHY_CTRL_SW_RESET);
80 } else {
81 /* pull low */
82 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
83 val & ~HDMI_PHY_CTRL_SW_RESET);
84 }
85
86 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
87 /* pull high */
88 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
89 val | HDMI_PHY_CTRL_SW_RESET_PLL);
90 } else {
91 /* pull low */
92 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
93 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
94 }
95}
96
97static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, 43static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
98 unsigned long int pixclock) 44 unsigned long int pixclock)
99{ 45{
@@ -117,7 +63,6 @@ static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
117 63
118static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { 64static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
119 .destroy = hdmi_phy_8x74_destroy, 65 .destroy = hdmi_phy_8x74_destroy,
120 .reset = hdmi_phy_8x74_reset,
121 .powerup = hdmi_phy_8x74_powerup, 66 .powerup = hdmi_phy_8x74_powerup,
122 .powerdown = hdmi_phy_8x74_powerdown, 67 .powerdown = hdmi_phy_8x74_powerdown,
123}; 68};
@@ -138,8 +83,6 @@ struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
138 83
139 phy->funcs = &hdmi_phy_8x74_funcs; 84 phy->funcs = &hdmi_phy_8x74_funcs;
140 85
141 phy_8x74->hdmi = hdmi;
142
143 /* for 8x74, the phy mmio is mapped separately: */ 86 /* for 8x74, the phy mmio is mapped separately: */
144 phy_8x74->mmio = msm_ioremap(hdmi->pdev, 87 phy_8x74->mmio = msm_ioremap(hdmi->pdev,
145 "phy_physical", "HDMI_8x74"); 88 "phy_physical", "HDMI_8x74");
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 978c3f70872a..2aa23b98f8aa 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25 25
26Permission is hereby granted, free of charge, to any person obtaining 26Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 153fc487d683..74b86734fef5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 4dc158ed2e95..6ac9aa165768 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -682,7 +682,5 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
682 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 682 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
683 plane->crtc = crtc; 683 plane->crtc = crtc;
684 684
685 mdp4_plane_install_properties(plane, &crtc->base);
686
687 return crtc; 685 return crtc;
688} 686}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 7369ee7f0c55..5ed38cf548a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -19,8 +19,11 @@
19#include "msm_drv.h" 19#include "msm_drv.h"
20#include "mdp4_kms.h" 20#include "mdp4_kms.h"
21 21
22void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) 22void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
23 uint32_t old_irqmask)
23{ 24{
25 mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
26 irqmask ^ (irqmask & old_irqmask));
24 mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); 27 mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
25} 28}
26 29
@@ -68,9 +71,10 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
68 struct drm_device *dev = mdp4_kms->dev; 71 struct drm_device *dev = mdp4_kms->dev;
69 struct msm_drm_private *priv = dev->dev_private; 72 struct msm_drm_private *priv = dev->dev_private;
70 unsigned int id; 73 unsigned int id;
71 uint32_t status; 74 uint32_t status, enable;
72 75
73 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); 76 enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
77 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
74 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); 78 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
75 79
76 VERB("status=%08x", status); 80 VERB("status=%08x", status);
@@ -86,13 +90,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
86 90
87int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 91int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
88{ 92{
93 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
94
95 mdp4_enable(mdp4_kms);
89 mdp_update_vblank_mask(to_mdp_kms(kms), 96 mdp_update_vblank_mask(to_mdp_kms(kms),
90 mdp4_crtc_vblank(crtc), true); 97 mdp4_crtc_vblank(crtc), true);
98 mdp4_disable(mdp4_kms);
99
91 return 0; 100 return 0;
92} 101}
93 102
94void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 103void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
95{ 104{
105 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
106
107 mdp4_enable(mdp4_kms);
96 mdp_update_vblank_mask(to_mdp_kms(kms), 108 mdp_update_vblank_mask(to_mdp_kms(kms),
97 mdp4_crtc_vblank(crtc), false); 109 mdp4_crtc_vblank(crtc), false);
110 mdp4_disable(mdp4_kms);
98} 111}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 531e4acc2a87..077f7521a971 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -241,22 +241,37 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
241} 241}
242 242
243#ifdef CONFIG_OF 243#ifdef CONFIG_OF
244static struct drm_panel *detect_panel(struct drm_device *dev, const char *name) 244static struct drm_panel *detect_panel(struct drm_device *dev)
245{ 245{
246 struct device_node *n; 246 struct device_node *endpoint, *panel_node;
247 struct device_node *np = dev->dev->of_node;
247 struct drm_panel *panel = NULL; 248 struct drm_panel *panel = NULL;
248 249
249 n = of_parse_phandle(dev->dev->of_node, name, 0); 250 endpoint = of_graph_get_next_endpoint(np, NULL);
250 if (n) { 251 if (!endpoint) {
251 panel = of_drm_find_panel(n); 252 dev_err(dev->dev, "no valid endpoint\n");
252 if (!panel) 253 return ERR_PTR(-ENODEV);
253 panel = ERR_PTR(-EPROBE_DEFER); 254 }
255
256 panel_node = of_graph_get_remote_port_parent(endpoint);
257 if (!panel_node) {
258 dev_err(dev->dev, "no valid panel node\n");
259 of_node_put(endpoint);
260 return ERR_PTR(-ENODEV);
261 }
262
263 of_node_put(endpoint);
264
265 panel = of_drm_find_panel(panel_node);
266 if (!panel) {
267 of_node_put(panel_node);
268 return ERR_PTR(-EPROBE_DEFER);
254 } 269 }
255 270
256 return panel; 271 return panel;
257} 272}
258#else 273#else
259static struct drm_panel *detect_panel(struct drm_device *dev, const char *name) 274static struct drm_panel *detect_panel(struct drm_device *dev)
260{ 275{
261 // ??? maybe use a module param to specify which panel is attached? 276 // ??? maybe use a module param to specify which panel is attached?
262} 277}
@@ -294,7 +309,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
294 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS: 309 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
295 */ 310 */
296 311
297 panel = detect_panel(dev, "qcom,lvds-panel"); 312 panel = detect_panel(dev);
298 if (IS_ERR(panel)) { 313 if (IS_ERR(panel)) {
299 ret = PTR_ERR(panel); 314 ret = PTR_ERR(panel);
300 dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret); 315 dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
@@ -527,6 +542,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
527 goto fail; 542 goto fail;
528 } 543 }
529 544
545 dev->mode_config.min_width = 0;
546 dev->mode_config.min_height = 0;
547 dev->mode_config.max_width = 2048;
548 dev->mode_config.max_height = 2048;
549
530 return kms; 550 return kms;
531 551
532fail: 552fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c1ecb9d6bdef..8a7f6e1e2bca 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -167,7 +167,8 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
167int mdp4_disable(struct mdp4_kms *mdp4_kms); 167int mdp4_disable(struct mdp4_kms *mdp4_kms);
168int mdp4_enable(struct mdp4_kms *mdp4_kms); 168int mdp4_enable(struct mdp4_kms *mdp4_kms);
169 169
170void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); 170void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
171 uint32_t old_irqmask);
171void mdp4_irq_preinstall(struct msm_kms *kms); 172void mdp4_irq_preinstall(struct msm_kms *kms);
172int mdp4_irq_postinstall(struct msm_kms *kms); 173int mdp4_irq_postinstall(struct msm_kms *kms);
173void mdp4_irq_uninstall(struct msm_kms *kms); 174void mdp4_irq_uninstall(struct msm_kms *kms);
@@ -175,29 +176,24 @@ irqreturn_t mdp4_irq(struct msm_kms *kms);
175int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 176int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
176void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 177void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
177 178
178static inline bool pipe_supports_yuv(enum mdp4_pipe pipe) 179static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
179{ 180{
180 switch (pipe) { 181 switch (pipe) {
181 case VG1: 182 case VG1:
182 case VG2: 183 case VG2:
183 case VG3: 184 case VG3:
184 case VG4: 185 case VG4:
185 return true; 186 return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
187 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
188 case RGB1:
189 case RGB2:
190 case RGB3:
191 return MDP_PIPE_CAP_SCALE;
186 default: 192 default:
187 return false; 193 return 0;
188 } 194 }
189} 195}
190 196
191static inline
192uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
193 uint32_t max_formats)
194{
195 return mdp_get_formats(pixel_formats, max_formats,
196 !pipe_supports_yuv(pipe_id));
197}
198
199void mdp4_plane_install_properties(struct drm_plane *plane,
200 struct drm_mode_object *obj);
201enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); 197enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
202struct drm_plane *mdp4_plane_init(struct drm_device *dev, 198struct drm_plane *mdp4_plane_init(struct drm_device *dev,
203 enum mdp4_pipe pipe_id, bool private_plane); 199 enum mdp4_pipe pipe_id, bool private_plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index c04843376c54..4cd6e721aa0a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -346,8 +346,10 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
346 346
347 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); 347 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
348 348
349 if (panel) 349 if (panel) {
350 drm_panel_disable(panel); 350 drm_panel_disable(panel);
351 drm_panel_unprepare(panel);
352 }
351 353
352 /* 354 /*
353 * Wait for a vsync so we know the ENABLE=0 latched before 355 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -412,8 +414,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
412 if (ret) 414 if (ret)
413 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); 415 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
414 416
415 if (panel) 417 if (panel) {
418 drm_panel_prepare(panel);
416 drm_panel_enable(panel); 419 drm_panel_enable(panel);
420 }
417 421
418 setup_phy(encoder); 422 setup_phy(encoder);
419 423
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 247a424445f7..e9dee367b597 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -26,6 +26,7 @@ struct mdp4_plane {
26 26
27 enum mdp4_pipe pipe; 27 enum mdp4_pipe pipe;
28 28
29 uint32_t caps;
29 uint32_t nformats; 30 uint32_t nformats;
30 uint32_t formats[32]; 31 uint32_t formats[32];
31 32
@@ -74,7 +75,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
74} 75}
75 76
76/* helper to install properties which are common to planes and crtcs */ 77/* helper to install properties which are common to planes and crtcs */
77void mdp4_plane_install_properties(struct drm_plane *plane, 78static void mdp4_plane_install_properties(struct drm_plane *plane,
78 struct drm_mode_object *obj) 79 struct drm_mode_object *obj)
79{ 80{
80 // XXX 81 // XXX
@@ -382,9 +383,11 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
382 383
383 mdp4_plane->pipe = pipe_id; 384 mdp4_plane->pipe = pipe_id;
384 mdp4_plane->name = pipe_names[pipe_id]; 385 mdp4_plane->name = pipe_names[pipe_id];
386 mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
385 387
386 mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats, 388 mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
387 ARRAY_SIZE(mdp4_plane->formats)); 389 ARRAY_SIZE(mdp4_plane->formats),
390 !pipe_supports_yuv(mdp4_plane->caps));
388 391
389 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 392 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
390 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 393 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 50e17527e2e5..3469f50d5590 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -381,49 +381,49 @@ static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x0
381static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } 381static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
382#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 382#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
383#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 383#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
384static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) 384static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
385{ 385{
386 return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; 386 return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
387} 387}
388#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 388#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
389#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 389#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
390static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val) 390static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
391{ 391{
392 return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; 392 return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
393} 393}
394#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 394#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
395#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 395#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
396static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val) 396static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
397{ 397{
398 return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; 398 return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
399} 399}
400#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 400#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
401#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 401#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
402static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val) 402static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
403{ 403{
404 return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; 404 return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
405} 405}
406#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 406#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
407#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 407#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
408static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val) 408static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
409{ 409{
410 return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; 410 return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
411} 411}
412#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 412#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
413#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 413#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
414static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val) 414static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
415{ 415{
416 return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; 416 return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
417} 417}
418#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 418#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
419#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 419#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
420static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val) 420static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
421{ 421{
422 return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; 422 return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
423} 423}
424#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 424#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
425#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 425#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
426static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) 426static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
427{ 427{
428 return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; 428 return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
429} 429}
@@ -431,13 +431,13 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
431#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 431#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
432#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 432#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
433#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 433#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
434static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val) 434static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
435{ 435{
436 return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; 436 return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
437} 437}
438#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 438#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
439#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 439#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
440static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val) 440static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
441{ 441{
442 return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; 442 return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
443} 443}
@@ -499,6 +499,44 @@ static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __o
499 499
500static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } 500static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
501 501
502static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
503{
504 switch (idx) {
505 case 0: return 0x00000040;
506 case 1: return 0x00000044;
507 case 2: return 0x00000048;
508 case 3: return 0x0000004c;
509 case 4: return 0x00000050;
510 case 5: return 0x00000054;
511 default: return INVALID_IDX(idx);
512 }
513}
514static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
515
516static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
517#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001
518#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004
519#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010
520#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040
521#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100
522#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400
523#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000
524#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000
525#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000
526#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000
527#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000
528#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20
529static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
530{
531 return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
532}
533#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000
534#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26
535static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
536{
537 return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
538}
539
502static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) 540static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
503{ 541{
504 switch (idx) { 542 switch (idx) {
@@ -803,11 +841,11 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
803} 841}
804#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 842#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
805#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 843#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
806#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000 844#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000
807#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 845#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19
808static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val) 846static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
809{ 847{
810 return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; 848 return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
811} 849}
812#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 850#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
813#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 851#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
@@ -897,41 +935,41 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
897static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } 935static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
898#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 936#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
899#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 937#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
900#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 938#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300
901#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8 939#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8
902static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val) 940static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
903{ 941{
904 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK; 942 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
905} 943}
906#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00 944#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00
907#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10 945#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10
908static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val) 946static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
909{ 947{
910 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK; 948 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
911} 949}
912#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000 950#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000
913#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12 951#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12
914static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val) 952static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
915{ 953{
916 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK; 954 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
917} 955}
918#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000 956#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000
919#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14 957#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14
920static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val) 958static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
921{ 959{
922 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK; 960 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
923} 961}
924#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000 962#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000
925#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16 963#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16
926static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val) 964static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
927{ 965{
928 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK; 966 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
929} 967}
930#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000 968#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000
931#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18 969#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18
932static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val) 970static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
933{ 971{
934 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; 972 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
935} 973}
936 974
937static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } 975static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
@@ -984,9 +1022,22 @@ static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x000000
984 1022
985static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } 1023static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
986 1024
987static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } 1025static inline uint32_t __offset_BLEND(uint32_t idx)
1026{
1027 switch (idx) {
1028 case 0: return 0x00000020;
1029 case 1: return 0x00000050;
1030 case 2: return 0x00000080;
1031 case 3: return 0x000000b0;
1032 case 4: return 0x00000230;
1033 case 5: return 0x00000260;
1034 case 6: return 0x00000290;
1035 default: return INVALID_IDX(idx);
1036 }
1037}
1038static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
988 1039
989static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } 1040static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
990#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 1041#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
991#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 1042#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
992static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) 1043static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -1008,25 +1059,25 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
1008#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 1059#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
1009#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 1060#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
1010 1061
1011static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; } 1062static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
1012 1063
1013static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; } 1064static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
1014 1065
1015static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; } 1066static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
1016 1067
1017static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; } 1068static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
1018 1069
1019static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; } 1070static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
1020 1071
1021static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; } 1072static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
1022 1073
1023static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; } 1074static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
1024 1075
1025static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; } 1076static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
1026 1077
1027static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; } 1078static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
1028 1079
1029static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; } 1080static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
1030 1081
1031static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } 1082static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
1032#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff 1083#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
@@ -1260,6 +1311,13 @@ static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x000000
1260static inline uint32_t __offset_WB(uint32_t idx) 1311static inline uint32_t __offset_WB(uint32_t idx)
1261{ 1312{
1262 switch (idx) { 1313 switch (idx) {
1314#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */
1315 case 0: return (mdp5_cfg->wb.base[0]);
1316 case 1: return (mdp5_cfg->wb.base[1]);
1317 case 2: return (mdp5_cfg->wb.base[2]);
1318 case 3: return (mdp5_cfg->wb.base[3]);
1319 case 4: return (mdp5_cfg->wb.base[4]);
1320#endif
1263 default: return INVALID_IDX(idx); 1321 default: return INVALID_IDX(idx);
1264 } 1322 }
1265} 1323}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b9a7931b162..a1e26f23c7cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -22,7 +22,76 @@ struct mdp5_cfg_handler {
22/* mdp5_cfg must be exposed (used in mdp5.xml.h) */ 22/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
23const struct mdp5_cfg_hw *mdp5_cfg = NULL; 23const struct mdp5_cfg_hw *mdp5_cfg = NULL;
24 24
25const struct mdp5_cfg_hw msm8x74_config = { 25const struct mdp5_cfg_hw msm8x74v1_config = {
26 .name = "msm8x74v1",
27 .mdp = {
28 .count = 1,
29 .base = { 0x00100 },
30 },
31 .smp = {
32 .mmb_count = 22,
33 .mmb_size = 4096,
34 .clients = {
35 [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
36 [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
37 [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
38 },
39 },
40 .ctl = {
41 .count = 5,
42 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
43 .flush_hw_mask = 0x0003ffff,
44 },
45 .pipe_vig = {
46 .count = 3,
47 .base = { 0x01200, 0x01600, 0x01a00 },
48 .caps = MDP_PIPE_CAP_HFLIP |
49 MDP_PIPE_CAP_VFLIP |
50 MDP_PIPE_CAP_SCALE |
51 MDP_PIPE_CAP_CSC |
52 0,
53 },
54 .pipe_rgb = {
55 .count = 3,
56 .base = { 0x01e00, 0x02200, 0x02600 },
57 .caps = MDP_PIPE_CAP_HFLIP |
58 MDP_PIPE_CAP_VFLIP |
59 MDP_PIPE_CAP_SCALE |
60 0,
61 },
62 .pipe_dma = {
63 .count = 2,
64 .base = { 0x02a00, 0x02e00 },
65 .caps = MDP_PIPE_CAP_HFLIP |
66 MDP_PIPE_CAP_VFLIP |
67 0,
68 },
69 .lm = {
70 .count = 5,
71 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
72 .nb_stages = 5,
73 },
74 .dspp = {
75 .count = 3,
76 .base = { 0x04600, 0x04a00, 0x04e00 },
77 },
78 .pp = {
79 .count = 3,
80 .base = { 0x21b00, 0x21c00, 0x21d00 },
81 },
82 .intf = {
83 .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
84 .connect = {
85 [0] = INTF_eDP,
86 [1] = INTF_DSI,
87 [2] = INTF_DSI,
88 [3] = INTF_HDMI,
89 },
90 },
91 .max_clk = 200000000,
92};
93
94const struct mdp5_cfg_hw msm8x74v2_config = {
26 .name = "msm8x74", 95 .name = "msm8x74",
27 .mdp = { 96 .mdp = {
28 .count = 1, 97 .count = 1,
@@ -45,19 +114,27 @@ const struct mdp5_cfg_hw msm8x74_config = {
45 .pipe_vig = { 114 .pipe_vig = {
46 .count = 3, 115 .count = 3,
47 .base = { 0x01200, 0x01600, 0x01a00 }, 116 .base = { 0x01200, 0x01600, 0x01a00 },
117 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
118 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
119 MDP_PIPE_CAP_DECIMATION,
48 }, 120 },
49 .pipe_rgb = { 121 .pipe_rgb = {
50 .count = 3, 122 .count = 3,
51 .base = { 0x01e00, 0x02200, 0x02600 }, 123 .base = { 0x01e00, 0x02200, 0x02600 },
124 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
125 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
52 }, 126 },
53 .pipe_dma = { 127 .pipe_dma = {
54 .count = 2, 128 .count = 2,
55 .base = { 0x02a00, 0x02e00 }, 129 .base = { 0x02a00, 0x02e00 },
130 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
56 }, 131 },
57 .lm = { 132 .lm = {
58 .count = 5, 133 .count = 5,
59 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, 134 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
60 .nb_stages = 5, 135 .nb_stages = 5,
136 .max_width = 2048,
137 .max_height = 0xFFFF,
61 }, 138 },
62 .dspp = { 139 .dspp = {
63 .count = 3, 140 .count = 3,
@@ -65,7 +142,7 @@ const struct mdp5_cfg_hw msm8x74_config = {
65 }, 142 },
66 .ad = { 143 .ad = {
67 .count = 2, 144 .count = 2,
68 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ 145 .base = { 0x13100, 0x13300 },
69 }, 146 },
70 .pp = { 147 .pp = {
71 .count = 3, 148 .count = 3,
@@ -113,19 +190,27 @@ const struct mdp5_cfg_hw apq8084_config = {
113 .pipe_vig = { 190 .pipe_vig = {
114 .count = 4, 191 .count = 4,
115 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, 192 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
193 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
194 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
195 MDP_PIPE_CAP_DECIMATION,
116 }, 196 },
117 .pipe_rgb = { 197 .pipe_rgb = {
118 .count = 4, 198 .count = 4,
119 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, 199 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
200 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
201 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
120 }, 202 },
121 .pipe_dma = { 203 .pipe_dma = {
122 .count = 2, 204 .count = 2,
123 .base = { 0x03200, 0x03600 }, 205 .base = { 0x03200, 0x03600 },
206 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
124 }, 207 },
125 .lm = { 208 .lm = {
126 .count = 6, 209 .count = 6,
127 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, 210 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
128 .nb_stages = 5, 211 .nb_stages = 5,
212 .max_width = 2048,
213 .max_height = 0xFFFF,
129 }, 214 },
130 .dspp = { 215 .dspp = {
131 .count = 4, 216 .count = 4,
@@ -174,19 +259,27 @@ const struct mdp5_cfg_hw msm8x16_config = {
174 .pipe_vig = { 259 .pipe_vig = {
175 .count = 1, 260 .count = 1,
176 .base = { 0x05000 }, 261 .base = { 0x05000 },
262 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
263 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
264 MDP_PIPE_CAP_DECIMATION,
177 }, 265 },
178 .pipe_rgb = { 266 .pipe_rgb = {
179 .count = 2, 267 .count = 2,
180 .base = { 0x15000, 0x17000 }, 268 .base = { 0x15000, 0x17000 },
269 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
270 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
181 }, 271 },
182 .pipe_dma = { 272 .pipe_dma = {
183 .count = 1, 273 .count = 1,
184 .base = { 0x25000 }, 274 .base = { 0x25000 },
275 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
185 }, 276 },
186 .lm = { 277 .lm = {
187 .count = 2, /* LM0 and LM3 */ 278 .count = 2, /* LM0 and LM3 */
188 .base = { 0x45000, 0x48000 }, 279 .base = { 0x45000, 0x48000 },
189 .nb_stages = 5, 280 .nb_stages = 5,
281 .max_width = 2048,
282 .max_height = 0xFFFF,
190 }, 283 },
191 .dspp = { 284 .dspp = {
192 .count = 1, 285 .count = 1,
@@ -203,14 +296,91 @@ const struct mdp5_cfg_hw msm8x16_config = {
203 .max_clk = 320000000, 296 .max_clk = 320000000,
204}; 297};
205 298
299const struct mdp5_cfg_hw msm8x94_config = {
300 .name = "msm8x94",
301 .mdp = {
302 .count = 1,
303 .base = { 0x01000 },
304 },
305 .smp = {
306 .mmb_count = 44,
307 .mmb_size = 8192,
308 .clients = {
309 [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
310 [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
311 [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
312 [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
313 [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
314 },
315 .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */
316 .reserved = {
317 [1] = 1, [4] = 1, [7] = 1, [19] = 1,
318 [16] = 5, [17] = 5, [18] = 5, [22] = 5,
319 },
320 },
321 .ctl = {
322 .count = 5,
323 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
324 .flush_hw_mask = 0xf0ffffff,
325 },
326 .pipe_vig = {
327 .count = 4,
328 .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
329 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
330 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
331 MDP_PIPE_CAP_DECIMATION,
332 },
333 .pipe_rgb = {
334 .count = 4,
335 .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
336 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
337 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
338 },
339 .pipe_dma = {
340 .count = 2,
341 .base = { 0x25000, 0x27000 },
342 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
343 },
344 .lm = {
345 .count = 6,
346 .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
347 .nb_stages = 8,
348 .max_width = 2048,
349 .max_height = 0xFFFF,
350 },
351 .dspp = {
352 .count = 4,
353 .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
354
355 },
356 .ad = {
357 .count = 3,
358 .base = { 0x79000, 0x79800, 0x7a000 },
359 },
360 .pp = {
361 .count = 4,
362 .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
363 },
364 .intf = {
365 .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
366 .connect = {
367 [0] = INTF_DISABLED,
368 [1] = INTF_DSI,
369 [2] = INTF_DSI,
370 [3] = INTF_HDMI,
371 },
372 },
373 .max_clk = 320000000,
374};
375
206static const struct mdp5_cfg_handler cfg_handlers[] = { 376static const struct mdp5_cfg_handler cfg_handlers[] = {
207 { .revision = 0, .config = { .hw = &msm8x74_config } }, 377 { .revision = 0, .config = { .hw = &msm8x74v1_config } },
208 { .revision = 2, .config = { .hw = &msm8x74_config } }, 378 { .revision = 2, .config = { .hw = &msm8x74v2_config } },
209 { .revision = 3, .config = { .hw = &apq8084_config } }, 379 { .revision = 3, .config = { .hw = &apq8084_config } },
210 { .revision = 6, .config = { .hw = &msm8x16_config } }, 380 { .revision = 6, .config = { .hw = &msm8x16_config } },
381 { .revision = 9, .config = { .hw = &msm8x94_config } },
211}; 382};
212 383
213
214static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); 384static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
215 385
216const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) 386const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 69349abe59f2..efb918d9f68b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -42,6 +42,13 @@ struct mdp5_sub_block {
42struct mdp5_lm_block { 42struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION; 43 MDP5_SUB_BLOCK_DEFINITION;
44 uint32_t nb_stages; /* number of stages per blender */ 44 uint32_t nb_stages; /* number of stages per blender */
45 uint32_t max_width; /* Maximum output resolution */
46 uint32_t max_height;
47};
48
49struct mdp5_pipe_block {
50 MDP5_SUB_BLOCK_DEFINITION;
51 uint32_t caps; /* pipe capabilities */
45}; 52};
46 53
47struct mdp5_ctl_block { 54struct mdp5_ctl_block {
@@ -70,9 +77,9 @@ struct mdp5_cfg_hw {
70 struct mdp5_sub_block mdp; 77 struct mdp5_sub_block mdp;
71 struct mdp5_smp_block smp; 78 struct mdp5_smp_block smp;
72 struct mdp5_ctl_block ctl; 79 struct mdp5_ctl_block ctl;
73 struct mdp5_sub_block pipe_vig; 80 struct mdp5_pipe_block pipe_vig;
74 struct mdp5_sub_block pipe_rgb; 81 struct mdp5_pipe_block pipe_rgb;
75 struct mdp5_sub_block pipe_dma; 82 struct mdp5_pipe_block pipe_dma;
76 struct mdp5_lm_block lm; 83 struct mdp5_lm_block lm;
77 struct mdp5_sub_block dspp; 84 struct mdp5_sub_block dspp;
78 struct mdp5_sub_block ad; 85 struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16fe7ea..8e6c9b598a57 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@ struct mdp5_cmd_encoder {
21 struct mdp5_interface intf; 21 struct mdp5_interface intf;
22 bool enabled; 22 bool enabled;
23 uint32_t bsc; 23 uint32_t bsc;
24
25 struct mdp5_ctl *ctl;
24}; 26};
25#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) 27#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
26 28
@@ -210,13 +212,14 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
210 mode->vsync_end, mode->vtotal, 212 mode->vsync_end, mode->vtotal,
211 mode->type, mode->flags); 213 mode->type, mode->flags);
212 pingpong_tearcheck_setup(encoder, mode); 214 pingpong_tearcheck_setup(encoder, mode);
213 mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf); 215 mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
216 mdp5_cmd_enc->ctl);
214} 217}
215 218
216static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) 219static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
217{ 220{
218 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); 221 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
219 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); 222 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
220 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 223 struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
221 224
222 if (WARN_ON(!mdp5_cmd_enc->enabled)) 225 if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
235static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) 238static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
236{ 239{
237 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); 240 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
238 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); 241 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
239 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 242 struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
240 243
241 if (WARN_ON(mdp5_cmd_enc->enabled)) 244 if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
300 303
301/* initialize command mode encoder */ 304/* initialize command mode encoder */
302struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, 305struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
303 struct mdp5_interface *intf) 306 struct mdp5_interface *intf, struct mdp5_ctl *ctl)
304{ 307{
305 struct drm_encoder *encoder = NULL; 308 struct drm_encoder *encoder = NULL;
306 struct mdp5_cmd_encoder *mdp5_cmd_enc; 309 struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
320 323
321 memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); 324 memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
322 encoder = &mdp5_cmd_enc->base; 325 encoder = &mdp5_cmd_enc->base;
326 mdp5_cmd_enc->ctl = ctl;
323 327
324 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, 328 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
325 DRM_MODE_ENCODER_DSI); 329 DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 4c1df4e6e5bc..7f9f4ac88029 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
160 160
161 if (mdp5_crtc->ctl && !crtc->state->enable) { 161 if (mdp5_crtc->ctl && !crtc->state->enable) {
162 /* set STAGE_UNUSED for all layers */ 162 /* set STAGE_UNUSED for all layers */
163 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); 163 mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
164 mdp5_ctl_release(mdp5_crtc->ctl);
165 mdp5_crtc->ctl = NULL; 164 mdp5_crtc->ctl = NULL;
166 } 165 }
167} 166}
@@ -196,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
196/* 195/*
197 * blend_setup() - blend all the planes of a CRTC 196 * blend_setup() - blend all the planes of a CRTC
198 * 197 *
199 * When border is enabled, the border color will ALWAYS be the base layer. 198 * If no base layer is available, border will be enabled as the base layer.
200 * Therefore, the first plane (private RGB pipe) will start at STAGE0. 199 * Otherwise all layers will be blended based on their stage calculated
201 * If disabled, the first plane starts at STAGE_BASE. 200 * in mdp5_crtc_atomic_check.
202 *
203 * Note:
204 * Border is not enabled here because the private plane is exactly
205 * the CRTC resolution.
206 */ 201 */
207static void blend_setup(struct drm_crtc *crtc) 202static void blend_setup(struct drm_crtc *crtc)
208{ 203{
@@ -210,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc)
210 struct mdp5_kms *mdp5_kms = get_kms(crtc); 205 struct mdp5_kms *mdp5_kms = get_kms(crtc);
211 struct drm_plane *plane; 206 struct drm_plane *plane;
212 const struct mdp5_cfg_hw *hw_cfg; 207 const struct mdp5_cfg_hw *hw_cfg;
213 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0; 208 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
209 const struct mdp_format *format;
210 uint32_t lm = mdp5_crtc->lm;
211 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
214 unsigned long flags; 212 unsigned long flags;
215#define blender(stage) ((stage) - STAGE_BASE) 213 uint8_t stage[STAGE_MAX + 1];
214 int i, plane_cnt = 0;
215#define blender(stage) ((stage) - STAGE0)
216 216
217 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 217 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
218 218
@@ -222,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc)
222 if (!mdp5_crtc->ctl) 222 if (!mdp5_crtc->ctl)
223 goto out; 223 goto out;
224 224
225 /* Collect all plane information */
225 drm_atomic_crtc_for_each_plane(plane, crtc) { 226 drm_atomic_crtc_for_each_plane(plane, crtc) {
226 enum mdp_mixer_stage_id stage = 227 pstate = to_mdp5_plane_state(plane->state);
227 to_mdp5_plane_state(plane->state)->stage; 228 pstates[pstate->stage] = pstate;
229 stage[pstate->stage] = mdp5_plane_pipe(plane);
230 plane_cnt++;
231 }
228 232
229 /* 233 /*
230 * Note: This cannot happen with current implementation but 234 * If there is no base layer, enable border color.
231 * we need to check this condition once z property is added 235 * Although it's not possbile in current blend logic,
232 */ 236 * put it here as a reminder.
233 BUG_ON(stage > hw_cfg->lm.nb_stages); 237 */
238 if (!pstates[STAGE_BASE] && plane_cnt) {
239 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
240 DBG("Border Color is enabled");
241 }
234 242
235 /* LM */ 243 /* The reset for blending */
236 mdp5_write(mdp5_kms, 244 for (i = STAGE0; i <= STAGE_MAX; i++) {
237 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)), 245 if (!pstates[i])
238 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | 246 continue;
239 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST)); 247
248 format = to_mdp_format(
249 msm_framebuffer_format(pstates[i]->base.fb));
250 plane = pstates[i]->base.plane;
251 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
252 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
253 fg_alpha = pstates[i]->alpha;
254 bg_alpha = 0xFF - pstates[i]->alpha;
255 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
256
257 if (format->alpha_enable && pstates[i]->premultiplied) {
258 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
259 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
260 if (fg_alpha != 0xff) {
261 bg_alpha = fg_alpha;
262 blend_op |=
263 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
264 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
265 } else {
266 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
267 }
268 } else if (format->alpha_enable) {
269 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
270 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
271 if (fg_alpha != 0xff) {
272 bg_alpha = fg_alpha;
273 blend_op |=
274 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
275 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
276 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
277 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
278 } else {
279 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
280 }
281 }
282
283 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
284 blender(i)), blend_op);
240 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, 285 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
241 blender(stage)), 0xff); 286 blender(i)), fg_alpha);
242 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, 287 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
243 blender(stage)), 0x00); 288 blender(i)), bg_alpha);
244 /* CTL */
245 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
246 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
247 pipe2name(mdp5_plane_pipe(plane)), stage);
248 } 289 }
249 290
250 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg); 291 mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
251 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
252 292
253out: 293out:
254 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); 294 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -339,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
339 struct mdp5_kms *mdp5_kms = get_kms(crtc); 379 struct mdp5_kms *mdp5_kms = get_kms(crtc);
340 struct drm_plane *plane; 380 struct drm_plane *plane;
341 struct drm_device *dev = crtc->dev; 381 struct drm_device *dev = crtc->dev;
342 struct plane_state pstates[STAGE3 + 1]; 382 struct plane_state pstates[STAGE_MAX + 1];
383 const struct mdp5_cfg_hw *hw_cfg;
343 int cnt = 0, i; 384 int cnt = 0, i;
344 385
345 DBG("%s: check", mdp5_crtc->name); 386 DBG("%s: check", mdp5_crtc->name);
346 387
347 /* request a free CTL, if none is already allocated for this CRTC */
348 if (state->enable && !mdp5_crtc->ctl) {
349 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
350 if (WARN_ON(!mdp5_crtc->ctl))
351 return -EINVAL;
352 }
353
354 /* verify that there are not too many planes attached to crtc 388 /* verify that there are not too many planes attached to crtc
355 * and that we don't have conflicting mixer stages: 389 * and that we don't have conflicting mixer stages:
356 */ 390 */
391 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
357 drm_atomic_crtc_state_for_each_plane(plane, state) { 392 drm_atomic_crtc_state_for_each_plane(plane, state) {
358 struct drm_plane_state *pstate; 393 struct drm_plane_state *pstate;
359 394 if (cnt >= (hw_cfg->lm.nb_stages)) {
360 if (cnt >= ARRAY_SIZE(pstates)) {
361 dev_err(dev->dev, "too many planes!\n"); 395 dev_err(dev->dev, "too many planes!\n");
362 return -EINVAL; 396 return -EINVAL;
363 } 397 }
@@ -369,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
369 */ 403 */
370 if (!pstate) 404 if (!pstate)
371 pstate = plane->state; 405 pstate = plane->state;
372
373 pstates[cnt].plane = plane; 406 pstates[cnt].plane = plane;
374 pstates[cnt].state = to_mdp5_plane_state(pstate); 407 pstates[cnt].state = to_mdp5_plane_state(pstate);
375 408
376 cnt++; 409 cnt++;
377 } 410 }
378 411
412 /* assign a stage based on sorted zpos property */
379 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); 413 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
380 414
381 for (i = 0; i < cnt; i++) { 415 for (i = 0; i < cnt; i++) {
@@ -693,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
693 complete_flip(crtc, file); 727 complete_flip(crtc, file);
694} 728}
695 729
696/* set interface for routing crtc->encoder: */ 730void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
697void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf) 731 struct mdp5_interface *intf, struct mdp5_ctl *ctl)
698{ 732{
699 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 733 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
700 struct mdp5_kms *mdp5_kms = get_kms(crtc); 734 struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -717,7 +751,8 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
717 751
718 mdp_irq_update(&mdp5_kms->base); 752 mdp_irq_update(&mdp5_kms->base);
719 753
720 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); 754 mdp5_crtc->ctl = ctl;
755 mdp5_ctl_set_pipeline(ctl, intf, lm);
721} 756}
722 757
723int mdp5_crtc_get_lm(struct drm_crtc *crtc) 758int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -726,12 +761,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc)
726 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; 761 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
727} 762}
728 763
729struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
730{
731 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
732 return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
733}
734
735void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) 764void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
736{ 765{
737 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 766 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -776,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
776 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 805 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
777 plane->crtc = crtc; 806 plane->crtc = crtc;
778 807
779 mdp5_plane_install_properties(plane, &crtc->base);
780
781 return crtc; 808 return crtc;
782} 809}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f224a76..4e81ca4f964a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
17/* 17/*
18 * CTL - MDP Control Pool Manager 18 * CTL - MDP Control Pool Manager
19 * 19 *
20 * Controls are shared between all CRTCs. 20 * Controls are shared between all display interfaces.
21 * 21 *
22 * They are intended to be used for data path configuration. 22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for 23 * The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
27 * 27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be 28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs. 29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */ 30 */
35 31
32#define CTL_STAT_BUSY 0x1
33#define CTL_STAT_BOOKED 0x2
34
36struct op_mode { 35struct op_mode {
37 struct mdp5_interface intf; 36 struct mdp5_interface intf;
38 37
@@ -46,8 +45,8 @@ struct mdp5_ctl {
46 u32 id; 45 u32 id;
47 int lm; 46 int lm;
48 47
49 /* whether this CTL has been allocated or not: */ 48 /* CTL status bitmask */
50 bool busy; 49 u32 status;
51 50
52 /* Operation Mode Configuration for the Pipeline */ 51 /* Operation Mode Configuration for the Pipeline */
53 struct op_mode pipeline; 52 struct op_mode pipeline;
@@ -61,7 +60,10 @@ struct mdp5_ctl {
61 60
62 bool cursor_on; 61 bool cursor_on;
63 62
64 struct drm_crtc *crtc; 63 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
64 bool flush_pending;
65
66 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
65}; 67};
66 68
67struct mdp5_ctl_manager { 69struct mdp5_ctl_manager {
@@ -74,6 +76,10 @@ struct mdp5_ctl_manager {
74 /* to filter out non-present bits in the current hardware config */ 76 /* to filter out non-present bits in the current hardware config */
75 u32 flush_hw_mask; 77 u32 flush_hw_mask;
76 78
79 /* status for single FLUSH */
80 bool single_flush_supported;
81 u32 single_flush_pending_mask;
82
77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ 83 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
78 spinlock_t pool_lock; 84 spinlock_t pool_lock;
79 struct mdp5_ctl ctls[MAX_CTL]; 85 struct mdp5_ctl ctls[MAX_CTL];
@@ -168,11 +174,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
168 spin_unlock_irqrestore(&ctl->hw_lock, flags); 174 spin_unlock_irqrestore(&ctl->hw_lock, flags);
169} 175}
170 176
171int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf) 177int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
178 struct mdp5_interface *intf, int lm)
172{ 179{
173 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 180 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
174 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); 181 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
175 182
183 if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
184 dev_err(mdp5_kms->dev->dev,
185 "CTL %d is allocated by INTF %d, but used by INTF %d\n",
186 ctl->id, ctl->pipeline.intf.num, intf->num);
187 return -EINVAL;
188 }
189
190 ctl->lm = lm;
191
176 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); 192 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
177 193
178 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | 194 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -287,29 +303,85 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
287 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; 303 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
288 304
289 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); 305 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
306 ctl->cursor_on = enable;
290 307
291 spin_unlock_irqrestore(&ctl->hw_lock, flags); 308 spin_unlock_irqrestore(&ctl->hw_lock, flags);
292 309
293 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); 310 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
294 ctl->cursor_on = enable;
295 311
296 return 0; 312 return 0;
297} 313}
298 314
299int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) 315static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
316 enum mdp_mixer_stage_id stage)
317{
318 switch (pipe) {
319 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
320 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
321 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
322 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
323 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
324 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
325 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
326 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
327 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
328 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
329 default: return 0;
330 }
331}
332
333static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
334 enum mdp_mixer_stage_id stage)
335{
336 if (stage < STAGE6)
337 return 0;
338
339 switch (pipe) {
340 case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
341 case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
342 case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
343 case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
344 case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
345 case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
346 case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
347 case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
348 case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
349 case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
350 default: return 0;
351 }
352}
353
354int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
355 u32 ctl_blend_op_flags)
300{ 356{
301 unsigned long flags; 357 unsigned long flags;
358 u32 blend_cfg = 0, blend_ext_cfg = 0;
359 int i, start_stage;
360
361 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
362 start_stage = STAGE0;
363 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
364 } else {
365 start_stage = STAGE_BASE;
366 }
302 367
368 for (i = start_stage; i < start_stage + stage_cnt; i++) {
369 blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
370 blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
371 }
372
373 spin_lock_irqsave(&ctl->hw_lock, flags);
303 if (ctl->cursor_on) 374 if (ctl->cursor_on)
304 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; 375 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
305 else
306 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
307 376
308 spin_lock_irqsave(&ctl->hw_lock, flags); 377 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
309 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); 378 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
310 spin_unlock_irqrestore(&ctl->hw_lock, flags); 379 spin_unlock_irqrestore(&ctl->hw_lock, flags);
311 380
312 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm); 381 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
382
383 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
384 blend_cfg, blend_ext_cfg);
313 385
314 return 0; 386 return 0;
315} 387}
@@ -379,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
379 return sw_mask; 451 return sw_mask;
380} 452}
381 453
454static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
455 u32 *flush_id)
456{
457 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
458
459 if (ctl->pair) {
460 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
461 ctl->flush_pending = true;
462 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
463 *flush_mask = 0;
464
465 if (ctl->pair->flush_pending) {
466 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
467 *flush_mask = ctl_mgr->single_flush_pending_mask;
468
469 ctl->flush_pending = false;
470 ctl->pair->flush_pending = false;
471 ctl_mgr->single_flush_pending_mask = 0;
472
473 DBG("Single FLUSH mask %x,ID %d", *flush_mask,
474 *flush_id);
475 }
476 }
477}
478
382/** 479/**
383 * mdp5_ctl_commit() - Register Flush 480 * mdp5_ctl_commit() - Register Flush
384 * 481 *
@@ -400,6 +497,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
400 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 497 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
401 struct op_mode *pipeline = &ctl->pipeline; 498 struct op_mode *pipeline = &ctl->pipeline;
402 unsigned long flags; 499 unsigned long flags;
500 u32 flush_id = ctl->id;
501 u32 curr_ctl_flush_mask;
403 502
404 pipeline->start_mask &= ~flush_mask; 503 pipeline->start_mask &= ~flush_mask;
405 504
@@ -415,9 +514,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
415 514
416 flush_mask &= ctl_mgr->flush_hw_mask; 515 flush_mask &= ctl_mgr->flush_hw_mask;
417 516
517 curr_ctl_flush_mask = flush_mask;
518
519 fix_for_single_flush(ctl, &flush_mask, &flush_id);
520
418 if (flush_mask) { 521 if (flush_mask) {
419 spin_lock_irqsave(&ctl->hw_lock, flags); 522 spin_lock_irqsave(&ctl->hw_lock, flags);
420 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); 523 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
421 spin_unlock_irqrestore(&ctl->hw_lock, flags); 524 spin_unlock_irqrestore(&ctl->hw_lock, flags);
422 } 525 }
423 526
@@ -426,7 +529,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
426 refill_start_mask(ctl); 529 refill_start_mask(ctl);
427 } 530 }
428 531
429 return flush_mask; 532 return curr_ctl_flush_mask;
430} 533}
431 534
432u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) 535u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -434,59 +537,85 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
434 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); 537 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
435} 538}
436 539
437void mdp5_ctl_release(struct mdp5_ctl *ctl) 540int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
438{ 541{
439 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 542 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
440 unsigned long flags; 543}
441 544
442 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) { 545/*
443 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)", 546 * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
444 ctl->id, ctl->busy); 547 */
445 return; 548int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
549{
550 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
551 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
552
553 /* do nothing silently if hw doesn't support */
554 if (!ctl_mgr->single_flush_supported)
555 return 0;
556
557 if (!enable) {
558 ctlx->pair = NULL;
559 ctly->pair = NULL;
560 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
561 return 0;
562 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
563 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
564 return -EINVAL;
565 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
566 dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
567 return -EINVAL;
446 } 568 }
447 569
448 spin_lock_irqsave(&ctl_mgr->pool_lock, flags); 570 ctlx->pair = ctly;
449 ctl->busy = false; 571 ctly->pair = ctlx;
450 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
451 572
452 DBG("CTL %d released", ctl->id); 573 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
453} 574 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
454 575
455int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) 576 return 0;
456{
457 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
458} 577}
459 578
460/* 579/*
461 * mdp5_ctl_request() - CTL dynamic allocation 580 * mdp5_ctl_request() - CTL allocation
462 * 581 *
463 * Note: Current implementation considers that we can only have one CRTC per CTL 582 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
583 * If no CTL is available in preferred category, allocate from the other one.
464 * 584 *
465 * @return first free CTL 585 * @return fail if no CTL is available.
466 */ 586 */
467struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, 587struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
468 struct drm_crtc *crtc) 588 int intf_num)
469{ 589{
470 struct mdp5_ctl *ctl = NULL; 590 struct mdp5_ctl *ctl = NULL;
591 const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
592 u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
471 unsigned long flags; 593 unsigned long flags;
472 int c; 594 int c;
473 595
474 spin_lock_irqsave(&ctl_mgr->pool_lock, flags); 596 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
475 597
598 /* search the preferred */
476 for (c = 0; c < ctl_mgr->nctl; c++) 599 for (c = 0; c < ctl_mgr->nctl; c++)
477 if (!ctl_mgr->ctls[c].busy) 600 if ((ctl_mgr->ctls[c].status & checkm) == match)
478 break; 601 goto found;
479 602
480 if (unlikely(c >= ctl_mgr->nctl)) { 603 dev_warn(ctl_mgr->dev->dev,
481 dev_err(ctl_mgr->dev->dev, "No more CTL available!"); 604 "fall back to the other CTL category for INTF %d!\n", intf_num);
482 goto unlock;
483 }
484 605
485 ctl = &ctl_mgr->ctls[c]; 606 match ^= CTL_STAT_BOOKED;
607 for (c = 0; c < ctl_mgr->nctl; c++)
608 if ((ctl_mgr->ctls[c].status & checkm) == match)
609 goto found;
486 610
487 ctl->lm = mdp5_crtc_get_lm(crtc); 611 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
488 ctl->crtc = crtc; 612 goto unlock;
489 ctl->busy = true; 613
614found:
615 ctl = &ctl_mgr->ctls[c];
616 ctl->pipeline.intf.num = intf_num;
617 ctl->lm = -1;
618 ctl->status |= CTL_STAT_BUSY;
490 ctl->pending_ctl_trigger = 0; 619 ctl->pending_ctl_trigger = 0;
491 DBG("CTL %d allocated", ctl->id); 620 DBG("CTL %d allocated", ctl->id);
492 621
@@ -515,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
515} 644}
516 645
517struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, 646struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
518 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) 647 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
519{ 648{
520 struct mdp5_ctl_manager *ctl_mgr; 649 struct mdp5_ctl_manager *ctl_mgr;
650 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
651 int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
521 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; 652 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
522 unsigned long flags; 653 unsigned long flags;
523 int c, ret; 654 int c, ret;
@@ -551,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
551 if (WARN_ON(!ctl_cfg->base[c])) { 682 if (WARN_ON(!ctl_cfg->base[c])) {
552 dev_err(dev->dev, "CTL_%d: base is null!\n", c); 683 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
553 ret = -EINVAL; 684 ret = -EINVAL;
685 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
554 goto fail; 686 goto fail;
555 } 687 }
556 ctl->ctlm = ctl_mgr; 688 ctl->ctlm = ctl_mgr;
557 ctl->id = c; 689 ctl->id = c;
558 ctl->reg_offset = ctl_cfg->base[c]; 690 ctl->reg_offset = ctl_cfg->base[c];
559 ctl->busy = false; 691 ctl->status = 0;
560 spin_lock_init(&ctl->hw_lock); 692 spin_lock_init(&ctl->hw_lock);
561 } 693 }
694
695 /*
696 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
697 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
698 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
699 * Single FLUSH is supported from hw rev v3.0.
700 */
701 if (rev >= 3) {
702 ctl_mgr->single_flush_supported = true;
703 /* Reserve CTL0/1 for INTF1/2 */
704 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
705 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
706 }
562 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); 707 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
563 DBG("Pool of %d CTLs created.", ctl_mgr->nctl); 708 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
564 709
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228c4f14..96148c6f863c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
23 */ 23 */
24struct mdp5_ctl_manager; 24struct mdp5_ctl_manager;
25struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, 25struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
26 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg); 26 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
27void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); 27void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
28void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); 28void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
29 29
@@ -32,49 +32,32 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
32 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, 32 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. 33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */ 34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); 35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
36
36int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); 37int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
37 38
38struct mdp5_interface; 39struct mdp5_interface;
39int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf); 40int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
41 int lm);
40int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); 42int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
41 43
42int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); 44int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
43 45int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
44/*
45 * blend_cfg (LM blender config):
46 *
47 * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
48 * are being blended according to their stage (z-order), through @blend_cfg arg.
49 */
50static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
51 enum mdp_mixer_stage_id stage)
52{
53 switch (pipe) {
54 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
55 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
56 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
57 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
58 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
59 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
60 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
61 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
62 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
63 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
64 default: return 0;
65 }
66}
67 46
68/* 47/*
69 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) 48 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
70 * 49 *
71 * @blend_cfg: see LM blender config definition below 50 * @stage: array to contain the pipe num for each stage
51 * @stage_cnt: valid stage number in stage array
52 * @ctl_blend_op_flags: blender operation mode flags
72 * 53 *
73 * Note: 54 * Note:
74 * CTL registers need to be flushed after calling this function 55 * CTL registers need to be flushed after calling this function
75 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 56 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
76 */ 57 */
77int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg); 58#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
59int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
60 u32 ctl_blend_op_flags);
78 61
79/** 62/**
80 * mdp_ctl_flush_mask...() - Register FLUSH masks 63 * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +74,6 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
91u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); 74u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
92u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); 75u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
93 76
94void mdp5_ctl_release(struct mdp5_ctl *ctl);
95
96 77
97 78
98#endif /* __MDP5_CTL_H__ */ 79#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08f3f1f..c9e32b08a7a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@ struct mdp5_encoder {
27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ 27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
28 bool enabled; 28 bool enabled;
29 uint32_t bsc; 29 uint32_t bsc;
30
31 struct mdp5_ctl *ctl;
30}; 32};
31#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) 33#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
32 34
@@ -222,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
222 224
223 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 225 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
224 226
225 mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf); 227 mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
228 mdp5_encoder->ctl);
226} 229}
227 230
228static void mdp5_encoder_disable(struct drm_encoder *encoder) 231static void mdp5_encoder_disable(struct drm_encoder *encoder)
229{ 232{
230 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 233 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
231 struct mdp5_kms *mdp5_kms = get_kms(encoder); 234 struct mdp5_kms *mdp5_kms = get_kms(encoder);
232 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); 235 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
233 int lm = mdp5_crtc_get_lm(encoder->crtc); 236 int lm = mdp5_crtc_get_lm(encoder->crtc);
234 struct mdp5_interface *intf = &mdp5_encoder->intf; 237 struct mdp5_interface *intf = &mdp5_encoder->intf;
235 int intfn = mdp5_encoder->intf.num; 238 int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
264{ 267{
265 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 268 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
266 struct mdp5_kms *mdp5_kms = get_kms(encoder); 269 struct mdp5_kms *mdp5_kms = get_kms(encoder);
267 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); 270 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
268 struct mdp5_interface *intf = &mdp5_encoder->intf; 271 struct mdp5_interface *intf = &mdp5_encoder->intf;
269 int intfn = mdp5_encoder->intf.num; 272 int intfn = mdp5_encoder->intf.num;
270 unsigned long flags; 273 unsigned long flags;
@@ -294,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
294 struct drm_encoder *slave_encoder) 297 struct drm_encoder *slave_encoder)
295{ 298{
296 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 299 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
300 struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
297 struct mdp5_kms *mdp5_kms; 301 struct mdp5_kms *mdp5_kms;
298 int intf_num; 302 int intf_num;
299 u32 data = 0; 303 u32 data = 0;
@@ -316,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
316 320
317 /* Make sure clocks are on when connectors calling this function. */ 321 /* Make sure clocks are on when connectors calling this function. */
318 mdp5_enable(mdp5_kms); 322 mdp5_enable(mdp5_kms);
319 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
320 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
321 /* Dumb Panel, Sync mode */ 323 /* Dumb Panel, Sync mode */
322 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); 324 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
323 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); 325 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
324 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); 326 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
327
328 mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
329
325 mdp5_disable(mdp5_kms); 330 mdp5_disable(mdp5_kms);
326 331
327 return 0; 332 return 0;
@@ -329,7 +334,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
329 334
330/* initialize encoder */ 335/* initialize encoder */
331struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, 336struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
332 struct mdp5_interface *intf) 337 struct mdp5_interface *intf, struct mdp5_ctl *ctl)
333{ 338{
334 struct drm_encoder *encoder = NULL; 339 struct drm_encoder *encoder = NULL;
335 struct mdp5_encoder *mdp5_encoder; 340 struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +350,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
345 350
346 memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf)); 351 memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
347 encoder = &mdp5_encoder->base; 352 encoder = &mdp5_encoder->base;
353 mdp5_encoder->ctl = ctl;
348 354
349 spin_lock_init(&mdp5_encoder->intf_lock); 355 spin_lock_init(&mdp5_encoder->intf_lock);
350 356
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 33bd4c6160dd..b1f73bee1368 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -21,8 +21,11 @@
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "mdp5_kms.h" 22#include "mdp5_kms.h"
23 23
24void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) 24void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
25 uint32_t old_irqmask)
25{ 26{
27 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
28 irqmask ^ (irqmask & old_irqmask));
26 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); 29 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
27} 30}
28 31
@@ -71,9 +74,10 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
71 struct drm_device *dev = mdp5_kms->dev; 74 struct drm_device *dev = mdp5_kms->dev;
72 struct msm_drm_private *priv = dev->dev_private; 75 struct msm_drm_private *priv = dev->dev_private;
73 unsigned int id; 76 unsigned int id;
74 uint32_t status; 77 uint32_t status, enable;
75 78
76 status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)); 79 enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
80 status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
77 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); 81 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
78 82
79 VERB("status=%08x", status); 83 VERB("status=%08x", status);
@@ -112,15 +116,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
112 116
113int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 117int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
114{ 118{
119 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
120
121 mdp5_enable(mdp5_kms);
115 mdp_update_vblank_mask(to_mdp_kms(kms), 122 mdp_update_vblank_mask(to_mdp_kms(kms),
116 mdp5_crtc_vblank(crtc), true); 123 mdp5_crtc_vblank(crtc), true);
124 mdp5_disable(mdp5_kms);
125
117 return 0; 126 return 0;
118} 127}
119 128
120void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 129void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
121{ 130{
131 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
132
133 mdp5_enable(mdp5_kms);
122 mdp_update_vblank_mask(to_mdp_kms(kms), 134 mdp_update_vblank_mask(to_mdp_kms(kms),
123 mdp5_crtc_vblank(crtc), false); 135 mdp5_crtc_vblank(crtc), false);
136 mdp5_disable(mdp5_kms);
124} 137}
125 138
126/* 139/*
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e253db5de5aa..047cb0433ccb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -177,7 +177,8 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
177 clk_disable_unprepare(mdp5_kms->ahb_clk); 177 clk_disable_unprepare(mdp5_kms->ahb_clk);
178 clk_disable_unprepare(mdp5_kms->axi_clk); 178 clk_disable_unprepare(mdp5_kms->axi_clk);
179 clk_disable_unprepare(mdp5_kms->core_clk); 179 clk_disable_unprepare(mdp5_kms->core_clk);
180 clk_disable_unprepare(mdp5_kms->lut_clk); 180 if (mdp5_kms->lut_clk)
181 clk_disable_unprepare(mdp5_kms->lut_clk);
181 182
182 return 0; 183 return 0;
183} 184}
@@ -189,14 +190,15 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
189 clk_prepare_enable(mdp5_kms->ahb_clk); 190 clk_prepare_enable(mdp5_kms->ahb_clk);
190 clk_prepare_enable(mdp5_kms->axi_clk); 191 clk_prepare_enable(mdp5_kms->axi_clk);
191 clk_prepare_enable(mdp5_kms->core_clk); 192 clk_prepare_enable(mdp5_kms->core_clk);
192 clk_prepare_enable(mdp5_kms->lut_clk); 193 if (mdp5_kms->lut_clk)
194 clk_prepare_enable(mdp5_kms->lut_clk);
193 195
194 return 0; 196 return 0;
195} 197}
196 198
197static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 199static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
198 enum mdp5_intf_type intf_type, int intf_num, 200 enum mdp5_intf_type intf_type, int intf_num,
199 enum mdp5_intf_mode intf_mode) 201 enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
200{ 202{
201 struct drm_device *dev = mdp5_kms->dev; 203 struct drm_device *dev = mdp5_kms->dev;
202 struct msm_drm_private *priv = dev->dev_private; 204 struct msm_drm_private *priv = dev->dev_private;
@@ -209,9 +211,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
209 211
210 if ((intf_type == INTF_DSI) && 212 if ((intf_type == INTF_DSI) &&
211 (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) 213 (intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
212 encoder = mdp5_cmd_encoder_init(dev, &intf); 214 encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
213 else 215 else
214 encoder = mdp5_encoder_init(dev, &intf); 216 encoder = mdp5_encoder_init(dev, &intf, ctl);
215 217
216 if (IS_ERR(encoder)) { 218 if (IS_ERR(encoder)) {
217 dev_err(dev->dev, "failed to construct encoder\n"); 219 dev_err(dev->dev, "failed to construct encoder\n");
@@ -249,6 +251,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
249 const struct mdp5_cfg_hw *hw_cfg = 251 const struct mdp5_cfg_hw *hw_cfg =
250 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 252 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
251 enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num]; 253 enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
254 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
255 struct mdp5_ctl *ctl;
252 struct drm_encoder *encoder; 256 struct drm_encoder *encoder;
253 int ret = 0; 257 int ret = 0;
254 258
@@ -259,8 +263,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
259 if (!priv->edp) 263 if (!priv->edp)
260 break; 264 break;
261 265
266 ctl = mdp5_ctlm_request(ctlm, intf_num);
267 if (!ctl) {
268 ret = -EINVAL;
269 break;
270 }
271
262 encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, 272 encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
263 MDP5_INTF_MODE_NONE); 273 MDP5_INTF_MODE_NONE, ctl);
264 if (IS_ERR(encoder)) { 274 if (IS_ERR(encoder)) {
265 ret = PTR_ERR(encoder); 275 ret = PTR_ERR(encoder);
266 break; 276 break;
@@ -272,8 +282,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
272 if (!priv->hdmi) 282 if (!priv->hdmi)
273 break; 283 break;
274 284
285 ctl = mdp5_ctlm_request(ctlm, intf_num);
286 if (!ctl) {
287 ret = -EINVAL;
288 break;
289 }
290
275 encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, 291 encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
276 MDP5_INTF_MODE_NONE); 292 MDP5_INTF_MODE_NONE, ctl);
277 if (IS_ERR(encoder)) { 293 if (IS_ERR(encoder)) {
278 ret = PTR_ERR(encoder); 294 ret = PTR_ERR(encoder);
279 break; 295 break;
@@ -298,14 +314,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
298 if (!priv->dsi[dsi_id]) 314 if (!priv->dsi[dsi_id])
299 break; 315 break;
300 316
317 ctl = mdp5_ctlm_request(ctlm, intf_num);
318 if (!ctl) {
319 ret = -EINVAL;
320 break;
321 }
322
301 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { 323 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
302 mode = (i == MSM_DSI_CMD_ENCODER_ID) ? 324 mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
303 MDP5_INTF_DSI_MODE_COMMAND : 325 MDP5_INTF_DSI_MODE_COMMAND :
304 MDP5_INTF_DSI_MODE_VIDEO; 326 MDP5_INTF_DSI_MODE_VIDEO;
305 dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, 327 dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
306 intf_num, mode); 328 intf_num, mode, ctl);
307 if (IS_ERR(dsi_encs)) { 329 if (IS_ERR(dsi_encs[i])) {
308 ret = PTR_ERR(dsi_encs); 330 ret = PTR_ERR(dsi_encs[i]);
309 break; 331 break;
310 } 332 }
311 } 333 }
@@ -327,9 +349,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
327 static const enum mdp5_pipe crtcs[] = { 349 static const enum mdp5_pipe crtcs[] = {
328 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 350 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
329 }; 351 };
330 static const enum mdp5_pipe pub_planes[] = { 352 static const enum mdp5_pipe vig_planes[] = {
331 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, 353 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
332 }; 354 };
355 static const enum mdp5_pipe dma_planes[] = {
356 SSPP_DMA0, SSPP_DMA1,
357 };
333 struct drm_device *dev = mdp5_kms->dev; 358 struct drm_device *dev = mdp5_kms->dev;
334 struct msm_drm_private *priv = dev->dev_private; 359 struct msm_drm_private *priv = dev->dev_private;
335 const struct mdp5_cfg_hw *hw_cfg; 360 const struct mdp5_cfg_hw *hw_cfg;
@@ -350,7 +375,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
350 struct drm_crtc *crtc; 375 struct drm_crtc *crtc;
351 376
352 plane = mdp5_plane_init(dev, crtcs[i], true, 377 plane = mdp5_plane_init(dev, crtcs[i], true,
353 hw_cfg->pipe_rgb.base[i]); 378 hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
354 if (IS_ERR(plane)) { 379 if (IS_ERR(plane)) {
355 ret = PTR_ERR(plane); 380 ret = PTR_ERR(plane);
356 dev_err(dev->dev, "failed to construct plane for %s (%d)\n", 381 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -368,16 +393,30 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
368 priv->crtcs[priv->num_crtcs++] = crtc; 393 priv->crtcs[priv->num_crtcs++] = crtc;
369 } 394 }
370 395
371 /* Construct public planes: */ 396 /* Construct video planes: */
372 for (i = 0; i < hw_cfg->pipe_vig.count; i++) { 397 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
373 struct drm_plane *plane; 398 struct drm_plane *plane;
374 399
375 plane = mdp5_plane_init(dev, pub_planes[i], false, 400 plane = mdp5_plane_init(dev, vig_planes[i], false,
376 hw_cfg->pipe_vig.base[i]); 401 hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
402 if (IS_ERR(plane)) {
403 ret = PTR_ERR(plane);
404 dev_err(dev->dev, "failed to construct %s plane: %d\n",
405 pipe2name(vig_planes[i]), ret);
406 goto fail;
407 }
408 }
409
410 /* DMA planes */
411 for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
412 struct drm_plane *plane;
413
414 plane = mdp5_plane_init(dev, dma_planes[i], false,
415 hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
377 if (IS_ERR(plane)) { 416 if (IS_ERR(plane)) {
378 ret = PTR_ERR(plane); 417 ret = PTR_ERR(plane);
379 dev_err(dev->dev, "failed to construct %s plane: %d\n", 418 dev_err(dev->dev, "failed to construct %s plane: %d\n",
380 pipe2name(pub_planes[i]), ret); 419 pipe2name(dma_planes[i]), ret);
381 goto fail; 420 goto fail;
382 } 421 }
383 } 422 }
@@ -489,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
489 goto fail; 528 goto fail;
490 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk"); 529 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
491 if (ret) 530 if (ret)
492 goto fail; 531 DBG("failed to get (optional) lut_clk clock");
493 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk"); 532 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
494 if (ret) 533 if (ret)
495 goto fail; 534 goto fail;
@@ -521,7 +560,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
521 goto fail; 560 goto fail;
522 } 561 }
523 562
524 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw); 563 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
525 if (IS_ERR(mdp5_kms->ctlm)) { 564 if (IS_ERR(mdp5_kms->ctlm)) {
526 ret = PTR_ERR(mdp5_kms->ctlm); 565 ret = PTR_ERR(mdp5_kms->ctlm);
527 mdp5_kms->ctlm = NULL; 566 mdp5_kms->ctlm = NULL;
@@ -577,6 +616,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
577 goto fail; 616 goto fail;
578 } 617 }
579 618
619 dev->mode_config.min_width = 0;
620 dev->mode_config.min_height = 0;
621 dev->mode_config.max_width = config->hw->lm.max_width;
622 dev->mode_config.max_height = config->hw->lm.max_height;
623
580 return kms; 624 return kms;
581 625
582fail: 626fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e79ac09b7216..0bb62423586e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -70,18 +70,12 @@ struct mdp5_kms {
70struct mdp5_plane_state { 70struct mdp5_plane_state {
71 struct drm_plane_state base; 71 struct drm_plane_state base;
72 72
73 /* "virtual" zpos.. we calculate actual mixer-stage at runtime 73 /* aligned with property */
74 * by sorting the attached planes by zpos and then assigning 74 uint8_t premultiplied;
75 * mixer stage lowest to highest. Private planes get default 75 uint8_t zpos;
76 * zpos of zero, and public planes a unique value that is 76 uint8_t alpha;
77 * greater than zero. This way, things work out if a naive
78 * userspace assigns planes to a crtc without setting zpos.
79 */
80 int zpos;
81 77
82 /* the actual mixer stage, calculated in crtc->atomic_check() 78 /* assigned by crtc blender */
83 * NOTE: this should move to mdp5_crtc_state, when that exists
84 */
85 enum mdp_mixer_stage_id stage; 79 enum mdp_mixer_stage_id stage;
86 80
87 /* some additional transactional status to help us know in the 81 /* some additional transactional status to help us know in the
@@ -192,7 +186,8 @@ static inline uint32_t lm2ppdone(int lm)
192int mdp5_disable(struct mdp5_kms *mdp5_kms); 186int mdp5_disable(struct mdp5_kms *mdp5_kms);
193int mdp5_enable(struct mdp5_kms *mdp5_kms); 187int mdp5_enable(struct mdp5_kms *mdp5_kms);
194 188
195void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); 189void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
190 uint32_t old_irqmask);
196void mdp5_irq_preinstall(struct msm_kms *kms); 191void mdp5_irq_preinstall(struct msm_kms *kms);
197int mdp5_irq_postinstall(struct msm_kms *kms); 192int mdp5_irq_postinstall(struct msm_kms *kms);
198void mdp5_irq_uninstall(struct msm_kms *kms); 193void mdp5_irq_uninstall(struct msm_kms *kms);
@@ -202,60 +197,38 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
202int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); 197int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
203void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 198void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
204 199
205static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
206{
207 switch (pipe) {
208 case SSPP_VIG0:
209 case SSPP_VIG1:
210 case SSPP_VIG2:
211 case SSPP_VIG3:
212 return true;
213 default:
214 return false;
215 }
216}
217
218static inline
219uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
220 uint32_t max_formats)
221{
222 return mdp_get_formats(pixel_formats, max_formats,
223 !pipe_supports_yuv(pipe));
224}
225
226void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 200uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 201void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane, 202void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state); 203 struct drm_plane_state *state);
232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 204enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
233struct drm_plane *mdp5_plane_init(struct drm_device *dev, 205struct drm_plane *mdp5_plane_init(struct drm_device *dev,
234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 206 enum mdp5_pipe pipe, bool private_plane,
207 uint32_t reg_offset, uint32_t caps);
235 208
236uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 209uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
237 210
238int mdp5_crtc_get_lm(struct drm_crtc *crtc); 211int mdp5_crtc_get_lm(struct drm_crtc *crtc);
239struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
240void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 212void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
241void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf); 213void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
214 struct mdp5_interface *intf, struct mdp5_ctl *ctl);
242void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); 215void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
243struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 216struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
244 struct drm_plane *plane, int id); 217 struct drm_plane *plane, int id);
245 218
246struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, 219struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
247 struct mdp5_interface *intf); 220 struct mdp5_interface *intf, struct mdp5_ctl *ctl);
248int mdp5_encoder_set_split_display(struct drm_encoder *encoder, 221int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
249 struct drm_encoder *slave_encoder); 222 struct drm_encoder *slave_encoder);
250 223
251#ifdef CONFIG_DRM_MSM_DSI 224#ifdef CONFIG_DRM_MSM_DSI
252struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, 225struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
253 struct mdp5_interface *intf); 226 struct mdp5_interface *intf, struct mdp5_ctl *ctl);
254int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, 227int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
255 struct drm_encoder *slave_encoder); 228 struct drm_encoder *slave_encoder);
256#else 229#else
257static inline struct drm_encoder *mdp5_cmd_encoder_init( 230static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
258 struct drm_device *dev, struct mdp5_interface *intf) 231 struct mdp5_interface *intf, struct mdp5_ctl *ctl)
259{ 232{
260 return ERR_PTR(-EINVAL); 233 return ERR_PTR(-EINVAL);
261} 234}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 22275568ab8b..07fb62fea6dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 2 * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
5 * 5 *
@@ -26,6 +26,7 @@ struct mdp5_plane {
26 26
27 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ 27 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
28 uint32_t reg_offset; 28 uint32_t reg_offset;
29 uint32_t caps;
29 30
30 uint32_t flush_mask; /* used to commit pipe registers */ 31 uint32_t flush_mask; /* used to commit pipe registers */
31 32
@@ -40,6 +41,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
40 unsigned int crtc_w, unsigned int crtc_h, 41 unsigned int crtc_w, unsigned int crtc_h,
41 uint32_t src_x, uint32_t src_y, 42 uint32_t src_x, uint32_t src_y,
42 uint32_t src_w, uint32_t src_h); 43 uint32_t src_w, uint32_t src_h);
44
43static void set_scanout_locked(struct drm_plane *plane, 45static void set_scanout_locked(struct drm_plane *plane,
44 struct drm_framebuffer *fb); 46 struct drm_framebuffer *fb);
45 47
@@ -64,18 +66,122 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
64 kfree(mdp5_plane); 66 kfree(mdp5_plane);
65} 67}
66 68
69static void mdp5_plane_install_rotation_property(struct drm_device *dev,
70 struct drm_plane *plane)
71{
72 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
73
74 if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
75 !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
76 return;
77
78 if (!dev->mode_config.rotation_property)
79 dev->mode_config.rotation_property =
80 drm_mode_create_rotation_property(dev,
81 BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
82
83 if (dev->mode_config.rotation_property)
84 drm_object_attach_property(&plane->base,
85 dev->mode_config.rotation_property,
86 0);
87}
88
67/* helper to install properties which are common to planes and crtcs */ 89/* helper to install properties which are common to planes and crtcs */
68void mdp5_plane_install_properties(struct drm_plane *plane, 90static void mdp5_plane_install_properties(struct drm_plane *plane,
69 struct drm_mode_object *obj) 91 struct drm_mode_object *obj)
70{ 92{
71 // XXX 93 struct drm_device *dev = plane->dev;
94 struct msm_drm_private *dev_priv = dev->dev_private;
95 struct drm_property *prop;
96
97#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
98 prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
99 if (!prop) { \
100 prop = drm_property_##fnc(dev, 0, #name, \
101 ##__VA_ARGS__); \
102 if (!prop) { \
103 dev_warn(dev->dev, \
104 "Create property %s failed\n", \
105 #name); \
106 return; \
107 } \
108 dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
109 } \
110 drm_object_attach_property(&plane->base, prop, init_val); \
111 } while (0)
112
113#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
114 INSTALL_PROPERTY(name, NAME, init_val, \
115 create_range, min, max)
116
117#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
118 INSTALL_PROPERTY(name, NAME, init_val, \
119 create_enum, name##_prop_enum_list, \
120 ARRAY_SIZE(name##_prop_enum_list))
121
122 INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
123
124 mdp5_plane_install_rotation_property(dev, plane);
125
126#undef INSTALL_RANGE_PROPERTY
127#undef INSTALL_ENUM_PROPERTY
128#undef INSTALL_PROPERTY
129}
130
131static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
132 struct drm_plane_state *state, struct drm_property *property,
133 uint64_t val)
134{
135 struct drm_device *dev = plane->dev;
136 struct mdp5_plane_state *pstate;
137 struct msm_drm_private *dev_priv = dev->dev_private;
138 int ret = 0;
139
140 pstate = to_mdp5_plane_state(state);
141
142#define SET_PROPERTY(name, NAME, type) do { \
143 if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
144 pstate->name = (type)val; \
145 DBG("Set property %s %d", #name, (type)val); \
146 goto done; \
147 } \
148 } while (0)
149
150 SET_PROPERTY(zpos, ZPOS, uint8_t);
151
152 dev_err(dev->dev, "Invalid property\n");
153 ret = -EINVAL;
154done:
155 return ret;
156#undef SET_PROPERTY
72} 157}
73 158
74int mdp5_plane_set_property(struct drm_plane *plane, 159static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
75 struct drm_property *property, uint64_t val) 160 const struct drm_plane_state *state,
161 struct drm_property *property, uint64_t *val)
76{ 162{
77 // XXX 163 struct drm_device *dev = plane->dev;
78 return -EINVAL; 164 struct mdp5_plane_state *pstate;
165 struct msm_drm_private *dev_priv = dev->dev_private;
166 int ret = 0;
167
168 pstate = to_mdp5_plane_state(state);
169
170#define GET_PROPERTY(name, NAME, type) do { \
171 if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
172 *val = pstate->name; \
173 DBG("Get property %s %lld", #name, *val); \
174 goto done; \
175 } \
176 } while (0)
177
178 GET_PROPERTY(zpos, ZPOS, uint8_t);
179
180 dev_err(dev->dev, "Invalid property\n");
181 ret = -EINVAL;
182done:
183 return ret;
184#undef SET_PROPERTY
79} 185}
80 186
81static void mdp5_plane_reset(struct drm_plane *plane) 187static void mdp5_plane_reset(struct drm_plane *plane)
@@ -88,11 +194,15 @@ static void mdp5_plane_reset(struct drm_plane *plane)
88 kfree(to_mdp5_plane_state(plane->state)); 194 kfree(to_mdp5_plane_state(plane->state));
89 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); 195 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
90 196
91 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 197 /* assign default blend parameters */
92 mdp5_state->zpos = 0; 198 mdp5_state->alpha = 255;
93 } else { 199 mdp5_state->premultiplied = 0;
94 mdp5_state->zpos = 1 + drm_plane_index(plane); 200
95 } 201 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
202 mdp5_state->zpos = STAGE_BASE;
203 else
204 mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
205
96 mdp5_state->base.plane = plane; 206 mdp5_state->base.plane = plane;
97 207
98 plane->state = &mdp5_state->base; 208 plane->state = &mdp5_state->base;
@@ -131,7 +241,9 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
131 .update_plane = drm_atomic_helper_update_plane, 241 .update_plane = drm_atomic_helper_update_plane,
132 .disable_plane = drm_atomic_helper_disable_plane, 242 .disable_plane = drm_atomic_helper_disable_plane,
133 .destroy = mdp5_plane_destroy, 243 .destroy = mdp5_plane_destroy,
134 .set_property = mdp5_plane_set_property, 244 .set_property = drm_atomic_helper_plane_set_property,
245 .atomic_set_property = mdp5_plane_atomic_set_property,
246 .atomic_get_property = mdp5_plane_atomic_get_property,
135 .reset = mdp5_plane_reset, 247 .reset = mdp5_plane_reset,
136 .atomic_duplicate_state = mdp5_plane_duplicate_state, 248 .atomic_duplicate_state = mdp5_plane_duplicate_state,
137 .atomic_destroy_state = mdp5_plane_destroy_state, 249 .atomic_destroy_state = mdp5_plane_destroy_state,
@@ -164,10 +276,44 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
164{ 276{
165 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 277 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
166 struct drm_plane_state *old_state = plane->state; 278 struct drm_plane_state *old_state = plane->state;
279 const struct mdp_format *format;
280 bool vflip, hflip;
167 281
168 DBG("%s: check (%d -> %d)", mdp5_plane->name, 282 DBG("%s: check (%d -> %d)", mdp5_plane->name,
169 plane_enabled(old_state), plane_enabled(state)); 283 plane_enabled(old_state), plane_enabled(state));
170 284
285 if (plane_enabled(state)) {
286 format = to_mdp_format(msm_framebuffer_format(state->fb));
287 if (MDP_FORMAT_IS_YUV(format) &&
288 !pipe_supports_yuv(mdp5_plane->caps)) {
289 dev_err(plane->dev->dev,
290 "Pipe doesn't support YUV\n");
291
292 return -EINVAL;
293 }
294
295 if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
296 (((state->src_w >> 16) != state->crtc_w) ||
297 ((state->src_h >> 16) != state->crtc_h))) {
298 dev_err(plane->dev->dev,
299 "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
300 state->src_w >> 16, state->src_h >> 16,
301 state->crtc_w, state->crtc_h);
302
303 return -EINVAL;
304 }
305
306 hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
307 vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
308 if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
309 (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
310 dev_err(plane->dev->dev,
311 "Pipe doesn't support flip\n");
312
313 return -EINVAL;
314 }
315 }
316
171 if (plane_enabled(state) && plane_enabled(old_state)) { 317 if (plane_enabled(state) && plane_enabled(old_state)) {
172 /* we cannot change SMP block configuration during scanout: */ 318 /* we cannot change SMP block configuration during scanout: */
173 bool full_modeset = false; 319 bool full_modeset = false;
@@ -346,16 +492,21 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
346 return 0; 492 return 0;
347} 493}
348 494
349static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, 495static int calc_scalex_steps(struct drm_plane *plane,
496 uint32_t pixel_format, uint32_t src, uint32_t dest,
350 uint32_t phasex_steps[2]) 497 uint32_t phasex_steps[2])
351{ 498{
499 struct mdp5_kms *mdp5_kms = get_kms(plane);
500 struct device *dev = mdp5_kms->dev->dev;
352 uint32_t phasex_step; 501 uint32_t phasex_step;
353 unsigned int hsub; 502 unsigned int hsub;
354 int ret; 503 int ret;
355 504
356 ret = calc_phase_step(src, dest, &phasex_step); 505 ret = calc_phase_step(src, dest, &phasex_step);
357 if (ret) 506 if (ret) {
507 dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
358 return ret; 508 return ret;
509 }
359 510
360 hsub = drm_format_horz_chroma_subsampling(pixel_format); 511 hsub = drm_format_horz_chroma_subsampling(pixel_format);
361 512
@@ -365,16 +516,21 @@ static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
365 return 0; 516 return 0;
366} 517}
367 518
368static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, 519static int calc_scaley_steps(struct drm_plane *plane,
520 uint32_t pixel_format, uint32_t src, uint32_t dest,
369 uint32_t phasey_steps[2]) 521 uint32_t phasey_steps[2])
370{ 522{
523 struct mdp5_kms *mdp5_kms = get_kms(plane);
524 struct device *dev = mdp5_kms->dev->dev;
371 uint32_t phasey_step; 525 uint32_t phasey_step;
372 unsigned int vsub; 526 unsigned int vsub;
373 int ret; 527 int ret;
374 528
375 ret = calc_phase_step(src, dest, &phasey_step); 529 ret = calc_phase_step(src, dest, &phasey_step);
376 if (ret) 530 if (ret) {
531 dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
377 return ret; 532 return ret;
533 }
378 534
379 vsub = drm_format_vert_chroma_subsampling(pixel_format); 535 vsub = drm_format_vert_chroma_subsampling(pixel_format);
380 536
@@ -384,28 +540,38 @@ static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
384 return 0; 540 return 0;
385} 541}
386 542
387static uint32_t get_scalex_config(uint32_t src, uint32_t dest) 543static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample,
388{ 544 uint32_t src, uint32_t dest, bool hor)
389 uint32_t filter;
390
391 filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
392
393 return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
394 MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
395 MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) |
396 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
397}
398
399static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
400{ 545{
401 uint32_t filter; 546 uint32_t y_filter = (src <= dest) ? SCALE_FILTER_CA : SCALE_FILTER_PCMN;
402 547 uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
403 filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; 548 uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */
549 SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
550 uint32_t value = 0;
551
552 if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) {
553 if (hor)
554 value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
555 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) |
556 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) |
557 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter);
558 else
559 value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
560 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
561 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
562 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
563 } else if (src != dest) {
564 if (hor)
565 value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
566 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
567 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
568 else
569 value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
570 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
571 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
572 }
404 573
405 return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | 574 return value;
406 MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
407 MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) |
408 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
409} 575}
410 576
411static int mdp5_plane_mode_set(struct drm_plane *plane, 577static int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -416,8 +582,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
416 uint32_t src_w, uint32_t src_h) 582 uint32_t src_w, uint32_t src_h)
417{ 583{
418 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 584 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
585 struct drm_plane_state *pstate = plane->state;
419 struct mdp5_kms *mdp5_kms = get_kms(plane); 586 struct mdp5_kms *mdp5_kms = get_kms(plane);
420 struct device *dev = mdp5_kms->dev->dev;
421 enum mdp5_pipe pipe = mdp5_plane->pipe; 587 enum mdp5_pipe pipe = mdp5_plane->pipe;
422 const struct mdp_format *format; 588 const struct mdp_format *format;
423 uint32_t nplanes, config = 0; 589 uint32_t nplanes, config = 0;
@@ -425,6 +591,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
425 uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,}; 591 uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
426 uint32_t hdecm = 0, vdecm = 0; 592 uint32_t hdecm = 0, vdecm = 0;
427 uint32_t pix_format; 593 uint32_t pix_format;
594 bool vflip, hflip;
428 unsigned long flags; 595 unsigned long flags;
429 int ret; 596 int ret;
430 597
@@ -449,7 +616,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
449 616
450 /* Request some memory from the SMP: */ 617 /* Request some memory from the SMP: */
451 ret = mdp5_smp_request(mdp5_kms->smp, 618 ret = mdp5_smp_request(mdp5_kms->smp,
452 mdp5_plane->pipe, fb->pixel_format, src_w); 619 mdp5_plane->pipe, format, src_w, false);
453 if (ret) 620 if (ret)
454 return ret; 621 return ret;
455 622
@@ -461,29 +628,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
461 */ 628 */
462 mdp5_smp_configure(mdp5_kms->smp, pipe); 629 mdp5_smp_configure(mdp5_kms->smp, pipe);
463 630
464 /* SCALE is used to both scale and up-sample chroma components */ 631 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
632 if (ret)
633 return ret;
465 634
466 if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) { 635 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
467 /* TODO calc hdecm */ 636 if (ret)
468 ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step); 637 return ret;
469 if (ret) {
470 dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
471 src_w, crtc_w, ret);
472 return ret;
473 }
474 config |= get_scalex_config(src_w, crtc_w);
475 }
476 638
477 if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) { 639 /* TODO calc hdecm, vdecm */
478 /* TODO calc vdecm */ 640
479 ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step); 641 /* SCALE is used to both scale and up-sample chroma components */
480 if (ret) { 642 config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true);
481 dev_err(dev, "Y scaling (%d -> %d) failed: %d\n", 643 config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false);
482 src_h, crtc_h, ret); 644 DBG("scale config = %x", config);
483 return ret; 645
484 } 646 hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
485 config |= get_scaley_config(src_h, crtc_h); 647 vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
486 }
487 648
488 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); 649 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
489 650
@@ -516,7 +677,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
516 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | 677 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
517 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | 678 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
518 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | 679 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
519 MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) | 680 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
520 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); 681 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
521 682
522 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), 683 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
@@ -526,29 +687,35 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
526 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); 687 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
527 688
528 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), 689 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
690 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
691 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
529 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); 692 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
530 693
531 /* not using secure mode: */ 694 /* not using secure mode: */
532 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); 695 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
533 696
534 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), 697 if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
535 phasex_step[0]); 698 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
536 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), 699 phasex_step[0]);
537 phasey_step[0]); 700 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
538 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), 701 phasey_step[0]);
539 phasex_step[1]); 702 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
540 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), 703 phasex_step[1]);
541 phasey_step[1]); 704 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
542 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), 705 phasey_step[1]);
543 MDP5_PIPE_DECIMATION_VERT(vdecm) | 706 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
544 MDP5_PIPE_DECIMATION_HORZ(hdecm)); 707 MDP5_PIPE_DECIMATION_VERT(vdecm) |
545 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); 708 MDP5_PIPE_DECIMATION_HORZ(hdecm));
546 709 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
547 if (MDP_FORMAT_IS_YUV(format)) 710 }
548 csc_enable(mdp5_kms, pipe, 711
549 mdp_get_default_csc_cfg(CSC_YUV2RGB)); 712 if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
550 else 713 if (MDP_FORMAT_IS_YUV(format))
551 csc_disable(mdp5_kms, pipe); 714 csc_enable(mdp5_kms, pipe,
715 mdp_get_default_csc_cfg(CSC_YUV2RGB));
716 else
717 csc_disable(mdp5_kms, pipe);
718 }
552 719
553 set_scanout_locked(plane, fb); 720 set_scanout_locked(plane, fb);
554 721
@@ -599,7 +766,8 @@ void mdp5_plane_complete_commit(struct drm_plane *plane,
599 766
600/* initialize plane */ 767/* initialize plane */
601struct drm_plane *mdp5_plane_init(struct drm_device *dev, 768struct drm_plane *mdp5_plane_init(struct drm_device *dev,
602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 769 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
770 uint32_t caps)
603{ 771{
604 struct drm_plane *plane = NULL; 772 struct drm_plane *plane = NULL;
605 struct mdp5_plane *mdp5_plane; 773 struct mdp5_plane *mdp5_plane;
@@ -616,9 +784,11 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
616 784
617 mdp5_plane->pipe = pipe; 785 mdp5_plane->pipe = pipe;
618 mdp5_plane->name = pipe2name(pipe); 786 mdp5_plane->name = pipe2name(pipe);
787 mdp5_plane->caps = caps;
619 788
620 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 789 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
621 ARRAY_SIZE(mdp5_plane->formats)); 790 ARRAY_SIZE(mdp5_plane->formats),
791 !pipe_supports_yuv(mdp5_plane->caps));
622 792
623 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe); 793 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
624 mdp5_plane->reg_offset = reg_offset; 794 mdp5_plane->reg_offset = reg_offset;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 64a27d86f2f5..563cca972dcb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -90,6 +90,8 @@
90struct mdp5_smp { 90struct mdp5_smp {
91 struct drm_device *dev; 91 struct drm_device *dev;
92 92
93 const struct mdp5_smp_block *cfg;
94
93 int blk_cnt; 95 int blk_cnt;
94 int blk_size; 96 int blk_size;
95 97
@@ -137,14 +139,12 @@ static int smp_request_block(struct mdp5_smp *smp,
137 u32 cid, int nblks) 139 u32 cid, int nblks)
138{ 140{
139 struct mdp5_kms *mdp5_kms = get_kms(smp); 141 struct mdp5_kms *mdp5_kms = get_kms(smp);
140 const struct mdp5_cfg_hw *hw_cfg;
141 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 142 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
142 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; 143 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
143 int reserved; 144 int reserved;
144 unsigned long flags; 145 unsigned long flags;
145 146
146 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 147 reserved = smp->cfg->reserved[cid];
147 reserved = hw_cfg->smp.reserved[cid];
148 148
149 spin_lock_irqsave(&smp->state_lock, flags); 149 spin_lock_irqsave(&smp->state_lock, flags);
150 150
@@ -209,12 +209,14 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
209 * decimated width. Ie. SMP buffering sits downstream of decimation (which 209 * decimated width. Ie. SMP buffering sits downstream of decimation (which
210 * presumably happens during the dma from scanout buffer). 210 * presumably happens during the dma from scanout buffer).
211 */ 211 */
212int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width) 212int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
213 const struct mdp_format *format, u32 width, bool hdecim)
213{ 214{
214 struct mdp5_kms *mdp5_kms = get_kms(smp); 215 struct mdp5_kms *mdp5_kms = get_kms(smp);
215 struct drm_device *dev = mdp5_kms->dev; 216 struct drm_device *dev = mdp5_kms->dev;
216 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); 217 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
217 int i, hsub, nplanes, nlines, nblks, ret; 218 int i, hsub, nplanes, nlines, nblks, ret;
219 u32 fmt = format->base.pixel_format;
218 220
219 nplanes = drm_format_num_planes(fmt); 221 nplanes = drm_format_num_planes(fmt);
220 hsub = drm_format_horz_chroma_subsampling(fmt); 222 hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -222,6 +224,21 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
222 /* different if BWC (compressed framebuffer?) enabled: */ 224 /* different if BWC (compressed framebuffer?) enabled: */
223 nlines = 2; 225 nlines = 2;
224 226
227 /* Newer MDPs have split/packing logic, which fetches sub-sampled
228 * U and V components (splits them from Y if necessary) and packs
229 * them together, writes to SMP using a single client.
230 */
231 if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
232 fmt = DRM_FORMAT_NV24;
233 nplanes = 2;
234
235 /* if decimation is enabled, HW decimates less on the
236 * sub sampled chroma components
237 */
238 if (hdecim && (hsub > 1))
239 hsub = 1;
240 }
241
225 for (i = 0, nblks = 0; i < nplanes; i++) { 242 for (i = 0, nblks = 0; i < nplanes; i++) {
226 int n, fetch_stride, cpp; 243 int n, fetch_stride, cpp;
227 244
@@ -388,6 +405,7 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
388 } 405 }
389 406
390 smp->dev = dev; 407 smp->dev = dev;
408 smp->cfg = cfg;
391 smp->blk_cnt = cfg->mmb_count; 409 smp->blk_cnt = cfg->mmb_count;
392 smp->blk_size = cfg->mmb_size; 410 smp->blk_size = cfg->mmb_size;
393 411
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 5b6c2363f592..20b87e800ea3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -39,7 +39,8 @@ struct mdp5_smp;
39struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg); 39struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
40void mdp5_smp_destroy(struct mdp5_smp *smp); 40void mdp5_smp_destroy(struct mdp5_smp *smp);
41 41
42int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width); 42int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
43 const struct mdp_format *format, u32 width, bool hdecim);
43void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe); 44void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
44void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe); 45void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
45void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe); 46void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 641d036c5bcb..4f792c4e40f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,7 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
46 46
47 47
48enum mdp_chroma_samp_type { 48enum mdp_chroma_samp_type {
49 CHROMA_RGB = 0, 49 CHROMA_FULL = 0,
50 CHROMA_H2V1 = 1, 50 CHROMA_H2V1 = 1,
51 CHROMA_H1V2 = 2, 51 CHROMA_H1V2 = 2,
52 CHROMA_420 = 3, 52 CHROMA_420 = 3,
@@ -65,6 +65,10 @@ enum mdp_mixer_stage_id {
65 STAGE1 = 3, 65 STAGE1 = 3,
66 STAGE2 = 4, 66 STAGE2 = 4,
67 STAGE3 = 5, 67 STAGE3 = 5,
68 STAGE4 = 6,
69 STAGE5 = 7,
70 STAGE6 = 8,
71 STAGE_MAX = 8,
68}; 72};
69 73
70enum mdp_alpha_type { 74enum mdp_alpha_type {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 7b0524dc1872..1c2caffc97e4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -71,7 +71,7 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
71 }, 71 },
72}; 72};
73 73
74#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \ 74#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
75 .base = { .pixel_format = DRM_FORMAT_ ## name }, \ 75 .base = { .pixel_format = DRM_FORMAT_ ## name }, \
76 .bpc_a = BPC ## a ## A, \ 76 .bpc_a = BPC ## a ## A, \
77 .bpc_r = BPC ## r, \ 77 .bpc_r = BPC ## r, \
@@ -83,7 +83,8 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
83 .cpp = c, \ 83 .cpp = c, \
84 .unpack_count = cnt, \ 84 .unpack_count = cnt, \
85 .fetch_type = fp, \ 85 .fetch_type = fp, \
86 .chroma_sample = cs \ 86 .chroma_sample = cs, \
87 .is_yuv = yuv, \
87} 88}
88 89
89#define BPC0A 0 90#define BPC0A 0
@@ -95,30 +96,49 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
95static const struct mdp_format formats[] = { 96static const struct mdp_format formats[] = {
96 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ 97 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
97 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, 98 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
98 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 99 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
99 FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, 100 FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
100 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 101 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
101 FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, 102 FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
102 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 103 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
103 FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, 104 FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
104 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 105 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
105 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, 106 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
106 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 107 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
107 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, 108 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
108 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 109 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
109 FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, 110 FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
110 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 111 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
111 FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, 112 FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
112 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 113 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
113 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, 114 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
114 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 115 MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
115 116
116 /* --- RGB formats above / YUV formats below this line --- */ 117 /* --- RGB formats above / YUV formats below this line --- */
117 118
119 /* 2 plane YUV */
118 FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, 120 FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
119 MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), 121 MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
120 FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, 122 FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
121 MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), 123 MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
124 FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
125 MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
126 FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
127 MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
128 /* 1 plane YUV */
129 FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
130 MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
131 FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
132 MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
133 FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
134 MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
135 FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
136 MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
137 /* 3 plane YUV */
138 FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
139 MDP_PLANE_PLANAR, CHROMA_420, true),
140 FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
141 MDP_PLANE_PLANAR, CHROMA_420, true),
122}; 142};
123 143
124/* 144/*
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 1988c243f437..64287304054d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -39,7 +39,8 @@ static void update_irq(struct mdp_kms *mdp_kms)
39 list_for_each_entry(irq, &mdp_kms->irq_list, node) 39 list_for_each_entry(irq, &mdp_kms->irq_list, node)
40 irqmask |= irq->irqmask; 40 irqmask |= irq->irqmask;
41 41
42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); 42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
43 mdp_kms->cur_irq_mask = irqmask;
43} 44}
44 45
45/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder 46/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 2d3428cb74d0..46a94e7d50e2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -30,7 +30,8 @@ struct mdp_kms;
30 30
31struct mdp_kms_funcs { 31struct mdp_kms_funcs {
32 struct msm_kms_funcs base; 32 struct msm_kms_funcs base;
33 void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask); 33 void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
34 uint32_t old_irqmask);
34}; 35};
35 36
36struct mdp_kms { 37struct mdp_kms {
@@ -42,6 +43,7 @@ struct mdp_kms {
42 bool in_irq; 43 bool in_irq;
43 struct list_head irq_list; /* list of mdp4_irq */ 44 struct list_head irq_list; /* list of mdp4_irq */
44 uint32_t vblank_mask; /* irq bits set for userspace vblank */ 45 uint32_t vblank_mask; /* irq bits set for userspace vblank */
46 uint32_t cur_irq_mask; /* current irq mask */
45}; 47};
46#define to_mdp_kms(x) container_of(x, struct mdp_kms, base) 48#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
47 49
@@ -90,13 +92,27 @@ struct mdp_format {
90 uint8_t cpp, unpack_count; 92 uint8_t cpp, unpack_count;
91 enum mdp_fetch_type fetch_type; 93 enum mdp_fetch_type fetch_type;
92 enum mdp_chroma_samp_type chroma_sample; 94 enum mdp_chroma_samp_type chroma_sample;
95 bool is_yuv;
93}; 96};
94#define to_mdp_format(x) container_of(x, struct mdp_format, base) 97#define to_mdp_format(x) container_of(x, struct mdp_format, base)
95#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB) 98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
96 99
97uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
98const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
99 102
103/* MDP pipe capabilities */
104#define MDP_PIPE_CAP_HFLIP BIT(0)
105#define MDP_PIPE_CAP_VFLIP BIT(1)
106#define MDP_PIPE_CAP_SCALE BIT(2)
107#define MDP_PIPE_CAP_CSC BIT(3)
108#define MDP_PIPE_CAP_DECIMATION BIT(4)
109
110static inline bool pipe_supports_yuv(uint32_t pipe_caps)
111{
112 return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
113 (pipe_caps & MDP_PIPE_CAP_CSC);
114}
115
100enum csc_type { 116enum csc_type {
101 CSC_RGB2RGB = 0, 117 CSC_RGB2RGB = 0,
102 CSC_YUV2RGB, 118 CSC_YUV2RGB,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d3467b115e04..0339c5d82d37 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -116,6 +116,65 @@ u32 msm_readl(const void __iomem *addr)
116 return val; 116 return val;
117} 117}
118 118
119struct vblank_event {
120 struct list_head node;
121 int crtc_id;
122 bool enable;
123};
124
125static void vblank_ctrl_worker(struct work_struct *work)
126{
127 struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
128 struct msm_vblank_ctrl, work);
129 struct msm_drm_private *priv = container_of(vbl_ctrl,
130 struct msm_drm_private, vblank_ctrl);
131 struct msm_kms *kms = priv->kms;
132 struct vblank_event *vbl_ev, *tmp;
133 unsigned long flags;
134
135 spin_lock_irqsave(&vbl_ctrl->lock, flags);
136 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
137 list_del(&vbl_ev->node);
138 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
139
140 if (vbl_ev->enable)
141 kms->funcs->enable_vblank(kms,
142 priv->crtcs[vbl_ev->crtc_id]);
143 else
144 kms->funcs->disable_vblank(kms,
145 priv->crtcs[vbl_ev->crtc_id]);
146
147 kfree(vbl_ev);
148
149 spin_lock_irqsave(&vbl_ctrl->lock, flags);
150 }
151
152 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
153}
154
155static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
156 int crtc_id, bool enable)
157{
158 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
159 struct vblank_event *vbl_ev;
160 unsigned long flags;
161
162 vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
163 if (!vbl_ev)
164 return -ENOMEM;
165
166 vbl_ev->crtc_id = crtc_id;
167 vbl_ev->enable = enable;
168
169 spin_lock_irqsave(&vbl_ctrl->lock, flags);
170 list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
171 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
172
173 queue_work(priv->wq, &vbl_ctrl->work);
174
175 return 0;
176}
177
119/* 178/*
120 * DRM operations: 179 * DRM operations:
121 */ 180 */
@@ -125,6 +184,18 @@ static int msm_unload(struct drm_device *dev)
125 struct msm_drm_private *priv = dev->dev_private; 184 struct msm_drm_private *priv = dev->dev_private;
126 struct msm_kms *kms = priv->kms; 185 struct msm_kms *kms = priv->kms;
127 struct msm_gpu *gpu = priv->gpu; 186 struct msm_gpu *gpu = priv->gpu;
187 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
188 struct vblank_event *vbl_ev, *tmp;
189
190 /* We must cancel and cleanup any pending vblank enable/disable
191 * work before drm_irq_uninstall() to avoid work re-enabling an
192 * irq after uninstall has disabled it.
193 */
194 cancel_work_sync(&vbl_ctrl->work);
195 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
196 list_del(&vbl_ev->node);
197 kfree(vbl_ev);
198 }
128 199
129 drm_kms_helper_poll_fini(dev); 200 drm_kms_helper_poll_fini(dev);
130 drm_mode_config_cleanup(dev); 201 drm_mode_config_cleanup(dev);
@@ -282,6 +353,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
282 353
283 INIT_LIST_HEAD(&priv->inactive_list); 354 INIT_LIST_HEAD(&priv->inactive_list);
284 INIT_LIST_HEAD(&priv->fence_cbs); 355 INIT_LIST_HEAD(&priv->fence_cbs);
356 INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
357 INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
358 spin_lock_init(&priv->vblank_ctrl.lock);
285 359
286 drm_mode_config_init(dev); 360 drm_mode_config_init(dev);
287 361
@@ -331,10 +405,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
331 } 405 }
332 } 406 }
333 407
334 dev->mode_config.min_width = 0;
335 dev->mode_config.min_height = 0;
336 dev->mode_config.max_width = 2048;
337 dev->mode_config.max_height = 2048;
338 dev->mode_config.funcs = &mode_config_funcs; 408 dev->mode_config.funcs = &mode_config_funcs;
339 409
340 ret = drm_vblank_init(dev, priv->num_crtcs); 410 ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -468,7 +538,7 @@ static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
468 if (!kms) 538 if (!kms)
469 return -ENXIO; 539 return -ENXIO;
470 DBG("dev=%p, crtc=%d", dev, crtc_id); 540 DBG("dev=%p, crtc=%d", dev, crtc_id);
471 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]); 541 return vblank_ctrl_queue_work(priv, crtc_id, true);
472} 542}
473 543
474static void msm_disable_vblank(struct drm_device *dev, int crtc_id) 544static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
@@ -478,7 +548,7 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
478 if (!kms) 548 if (!kms)
479 return; 549 return;
480 DBG("dev=%p, crtc=%d", dev, crtc_id); 550 DBG("dev=%p, crtc=%d", dev, crtc_id);
481 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]); 551 vblank_ctrl_queue_work(priv, crtc_id, false);
482} 552}
483 553
484/* 554/*
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4ff0ec9c994b..3be7a56b14f1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/iommu.h> 31#include <linux/iommu.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/of_graph.h>
33#include <asm/sizes.h> 34#include <asm/sizes.h>
34 35
35#ifndef CONFIG_OF 36#ifndef CONFIG_OF
@@ -64,6 +65,19 @@ struct msm_file_private {
64 int dummy; 65 int dummy;
65}; 66};
66 67
68enum msm_mdp_plane_property {
69 PLANE_PROP_ZPOS,
70 PLANE_PROP_ALPHA,
71 PLANE_PROP_PREMULTIPLIED,
72 PLANE_PROP_MAX_NUM
73};
74
75struct msm_vblank_ctrl {
76 struct work_struct work;
77 struct list_head event_list;
78 spinlock_t lock;
79};
80
67struct msm_drm_private { 81struct msm_drm_private {
68 82
69 struct msm_kms *kms; 83 struct msm_kms *kms;
@@ -128,6 +142,9 @@ struct msm_drm_private {
128 unsigned int num_connectors; 142 unsigned int num_connectors;
129 struct drm_connector *connectors[8]; 143 struct drm_connector *connectors[8];
130 144
145 /* Properties */
146 struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
147
131 /* VRAM carveout, used when no IOMMU: */ 148 /* VRAM carveout, used when no IOMMU: */
132 struct { 149 struct {
133 unsigned long size; 150 unsigned long size;
@@ -137,6 +154,8 @@ struct msm_drm_private {
137 */ 154 */
138 struct drm_mm mm; 155 struct drm_mm mm;
139 } vram; 156 } vram;
157
158 struct msm_vblank_ctrl vblank_ctrl;
140}; 159};
141 160
142struct msm_format { 161struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 95f6532df02d..f97a1964ef39 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -43,11 +43,11 @@ static struct fb_ops msm_fb_ops = {
43 /* Note: to properly handle manual update displays, we wrap the 43 /* Note: to properly handle manual update displays, we wrap the
44 * basic fbdev ops which write to the framebuffer 44 * basic fbdev ops which write to the framebuffer
45 */ 45 */
46 .fb_read = fb_sys_read, 46 .fb_read = drm_fb_helper_sys_read,
47 .fb_write = fb_sys_write, 47 .fb_write = drm_fb_helper_sys_write,
48 .fb_fillrect = sys_fillrect, 48 .fb_fillrect = drm_fb_helper_sys_fillrect,
49 .fb_copyarea = sys_copyarea, 49 .fb_copyarea = drm_fb_helper_sys_copyarea,
50 .fb_imageblit = sys_imageblit, 50 .fb_imageblit = drm_fb_helper_sys_imageblit,
51 .fb_mmap = msm_fbdev_mmap, 51 .fb_mmap = msm_fbdev_mmap,
52 52
53 .fb_check_var = drm_fb_helper_check_var, 53 .fb_check_var = drm_fb_helper_check_var,
@@ -144,10 +144,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
144 goto fail_unlock; 144 goto fail_unlock;
145 } 145 }
146 146
147 fbi = framebuffer_alloc(0, dev->dev); 147 fbi = drm_fb_helper_alloc_fbi(helper);
148 if (!fbi) { 148 if (IS_ERR(fbi)) {
149 dev_err(dev->dev, "failed to allocate fb info\n"); 149 dev_err(dev->dev, "failed to allocate fb info\n");
150 ret = -ENOMEM; 150 ret = PTR_ERR(fbi);
151 goto fail_unlock; 151 goto fail_unlock;
152 } 152 }
153 153
@@ -155,7 +155,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
155 155
156 fbdev->fb = fb; 156 fbdev->fb = fb;
157 helper->fb = fb; 157 helper->fb = fb;
158 helper->fbdev = fbi;
159 158
160 fbi->par = helper; 159 fbi->par = helper;
161 fbi->flags = FBINFO_DEFAULT; 160 fbi->flags = FBINFO_DEFAULT;
@@ -163,12 +162,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
163 162
164 strcpy(fbi->fix.id, "msm"); 163 strcpy(fbi->fix.id, "msm");
165 164
166 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
167 if (ret) {
168 ret = -ENOMEM;
169 goto fail_unlock;
170 }
171
172 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 165 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
173 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 166 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
174 167
@@ -191,7 +184,6 @@ fail_unlock:
191fail: 184fail:
192 185
193 if (ret) { 186 if (ret) {
194 framebuffer_release(fbi);
195 if (fb) { 187 if (fb) {
196 drm_framebuffer_unregister_private(fb); 188 drm_framebuffer_unregister_private(fb);
197 drm_framebuffer_remove(fb); 189 drm_framebuffer_remove(fb);
@@ -266,17 +258,11 @@ void msm_fbdev_free(struct drm_device *dev)
266 struct msm_drm_private *priv = dev->dev_private; 258 struct msm_drm_private *priv = dev->dev_private;
267 struct drm_fb_helper *helper = priv->fbdev; 259 struct drm_fb_helper *helper = priv->fbdev;
268 struct msm_fbdev *fbdev; 260 struct msm_fbdev *fbdev;
269 struct fb_info *fbi;
270 261
271 DBG(); 262 DBG();
272 263
273 fbi = helper->fbdev; 264 drm_fb_helper_unregister_fbi(helper);
274 265 drm_fb_helper_release_fbi(helper);
275 /* only cleanup framebuffer if it is present */
276 if (fbi) {
277 unregister_framebuffer(fbi);
278 framebuffer_release(fbi);
279 }
280 266
281 drm_fb_helper_fini(helper); 267 drm_fb_helper_fini(helper);
282 268
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 9f2498571d09..5f6ea1873f51 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -261,7 +261,7 @@ nv10_overlay_init(struct drm_device *device)
261{ 261{
262 struct nouveau_drm *drm = nouveau_drm(device); 262 struct nouveau_drm *drm = nouveau_drm(device);
263 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 263 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
264 int num_formats = ARRAY_SIZE(formats); 264 unsigned int num_formats = ARRAY_SIZE(formats);
265 int ret; 265 int ret;
266 266
267 if (!plane) 267 if (!plane)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553abe4a..2791701685dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -84,7 +84,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
84 84
85 if (ret != -ENODEV) 85 if (ret != -ENODEV)
86 nouveau_fbcon_gpu_lockup(info); 86 nouveau_fbcon_gpu_lockup(info);
87 cfb_fillrect(info, rect); 87 drm_fb_helper_cfb_fillrect(info, rect);
88} 88}
89 89
90static void 90static void
@@ -116,7 +116,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
116 116
117 if (ret != -ENODEV) 117 if (ret != -ENODEV)
118 nouveau_fbcon_gpu_lockup(info); 118 nouveau_fbcon_gpu_lockup(info);
119 cfb_copyarea(info, image); 119 drm_fb_helper_cfb_copyarea(info, image);
120} 120}
121 121
122static void 122static void
@@ -148,7 +148,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
148 148
149 if (ret != -ENODEV) 149 if (ret != -ENODEV)
150 nouveau_fbcon_gpu_lockup(info); 150 nouveau_fbcon_gpu_lockup(info);
151 cfb_imageblit(info, image); 151 drm_fb_helper_cfb_imageblit(info, image);
152} 152}
153 153
154static int 154static int
@@ -197,9 +197,9 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
197 .owner = THIS_MODULE, 197 .owner = THIS_MODULE,
198 .fb_check_var = drm_fb_helper_check_var, 198 .fb_check_var = drm_fb_helper_check_var,
199 .fb_set_par = drm_fb_helper_set_par, 199 .fb_set_par = drm_fb_helper_set_par,
200 .fb_fillrect = cfb_fillrect, 200 .fb_fillrect = drm_fb_helper_cfb_fillrect,
201 .fb_copyarea = cfb_copyarea, 201 .fb_copyarea = drm_fb_helper_cfb_copyarea,
202 .fb_imageblit = cfb_imageblit, 202 .fb_imageblit = drm_fb_helper_cfb_imageblit,
203 .fb_pan_display = drm_fb_helper_pan_display, 203 .fb_pan_display = drm_fb_helper_pan_display,
204 .fb_blank = drm_fb_helper_blank, 204 .fb_blank = drm_fb_helper_blank,
205 .fb_setcmap = drm_fb_helper_setcmap, 205 .fb_setcmap = drm_fb_helper_setcmap,
@@ -319,7 +319,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
319 struct nouveau_channel *chan; 319 struct nouveau_channel *chan;
320 struct nouveau_bo *nvbo; 320 struct nouveau_bo *nvbo;
321 struct drm_mode_fb_cmd2 mode_cmd; 321 struct drm_mode_fb_cmd2 mode_cmd;
322 struct pci_dev *pdev = dev->pdev;
323 int size, ret; 322 int size, ret;
324 323
325 mode_cmd.width = sizes->surface_width; 324 mode_cmd.width = sizes->surface_width;
@@ -365,20 +364,13 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
365 364
366 mutex_lock(&dev->struct_mutex); 365 mutex_lock(&dev->struct_mutex);
367 366
368 info = framebuffer_alloc(0, &pdev->dev); 367 info = drm_fb_helper_alloc_fbi(helper);
369 if (!info) { 368 if (IS_ERR(info)) {
370 ret = -ENOMEM; 369 ret = PTR_ERR(info);
371 goto out_unlock; 370 goto out_unlock;
372 } 371 }
373 info->skip_vt_switch = 1; 372 info->skip_vt_switch = 1;
374 373
375 ret = fb_alloc_cmap(&info->cmap, 256, 0);
376 if (ret) {
377 ret = -ENOMEM;
378 framebuffer_release(info);
379 goto out_unlock;
380 }
381
382 info->par = fbcon; 374 info->par = fbcon;
383 375
384 nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo); 376 nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
@@ -388,7 +380,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
388 380
389 /* setup helper */ 381 /* setup helper */
390 fbcon->helper.fb = fb; 382 fbcon->helper.fb = fb;
391 fbcon->helper.fbdev = info;
392 383
393 strcpy(info->fix.id, "nouveaufb"); 384 strcpy(info->fix.id, "nouveaufb");
394 if (!chan) 385 if (!chan)
@@ -450,15 +441,9 @@ static int
450nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) 441nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
451{ 442{
452 struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb; 443 struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
453 struct fb_info *info;
454 444
455 if (fbcon->helper.fbdev) { 445 drm_fb_helper_unregister_fbi(&fbcon->helper);
456 info = fbcon->helper.fbdev; 446 drm_fb_helper_release_fbi(&fbcon->helper);
457 unregister_framebuffer(info);
458 if (info->cmap.len)
459 fb_dealloc_cmap(&info->cmap);
460 framebuffer_release(info);
461 }
462 447
463 if (nouveau_fb->nvbo) { 448 if (nouveau_fb->nvbo) {
464 nouveau_bo_unmap(nouveau_fb->nvbo); 449 nouveau_bo_unmap(nouveau_fb->nvbo);
@@ -496,7 +481,7 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
496 console_lock(); 481 console_lock();
497 if (state == FBINFO_STATE_RUNNING) 482 if (state == FBINFO_STATE_RUNNING)
498 nouveau_fbcon_accel_restore(dev); 483 nouveau_fbcon_accel_restore(dev);
499 fb_set_suspend(drm->fbcon->helper.fbdev, state); 484 drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
500 if (state != FBINFO_STATE_RUNNING) 485 if (state != FBINFO_STATE_RUNNING)
501 nouveau_fbcon_accel_save_disable(dev); 486 nouveau_fbcon_accel_save_disable(dev);
502 console_unlock(); 487 console_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7464aef34674..737e8f976a98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -433,10 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
433void 433void
434nouveau_ttm_fini(struct nouveau_drm *drm) 434nouveau_ttm_fini(struct nouveau_drm *drm)
435{ 435{
436 mutex_lock(&drm->dev->struct_mutex);
437 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 436 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
438 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 437 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
439 mutex_unlock(&drm->dev->struct_mutex);
440 438
441 ttm_bo_device_release(&drm->ttm.bdev); 439 ttm_bo_device_release(&drm->ttm.bdev);
442 440
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 52c22b026005..e10f9644140f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -166,30 +166,14 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
166} 166}
167 167
168static int 168static int
169gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
170{
171 struct nvkm_object *obj = (void *)chan;
172 struct gk104_fifo_priv *priv = (void *)obj->engine;
173
174 nv_wr32(priv, 0x002634, chan->base.chid);
175 if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
176 nv_error(priv, "channel %d [%s] kick timeout\n",
177 chan->base.chid, nvkm_client_name(chan));
178 return -EBUSY;
179 }
180
181 return 0;
182}
183
184static int
185gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, 169gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
186 struct nvkm_object *object) 170 struct nvkm_object *object)
187{ 171{
188 struct nvkm_bar *bar = nvkm_bar(parent); 172 struct nvkm_bar *bar = nvkm_bar(parent);
173 struct gk104_fifo_priv *priv = (void *)parent->engine;
189 struct gk104_fifo_base *base = (void *)parent->parent; 174 struct gk104_fifo_base *base = (void *)parent->parent;
190 struct gk104_fifo_chan *chan = (void *)parent; 175 struct gk104_fifo_chan *chan = (void *)parent;
191 u32 addr; 176 u32 addr;
192 int ret;
193 177
194 switch (nv_engidx(object->engine)) { 178 switch (nv_engidx(object->engine)) {
195 case NVDEV_ENGINE_SW : return 0; 179 case NVDEV_ENGINE_SW : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
204 return -EINVAL; 188 return -EINVAL;
205 } 189 }
206 190
207 ret = gk104_fifo_chan_kick(chan); 191 nv_wr32(priv, 0x002634, chan->base.chid);
208 if (ret && suspend) 192 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
209 return ret; 193 nv_error(priv, "channel %d [%s] kick timeout\n",
194 chan->base.chid, nvkm_client_name(chan));
195 if (suspend)
196 return -EBUSY;
197 }
210 198
211 if (addr) { 199 if (addr) {
212 nv_wo32(base, addr + 0x00, 0x00000000); 200 nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
331 gk104_fifo_runlist_update(priv, chan->engine); 319 gk104_fifo_runlist_update(priv, chan->engine);
332 } 320 }
333 321
334 gk104_fifo_chan_kick(chan);
335 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); 322 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
336 return nvkm_fifo_channel_fini(&chan->base, suspend); 323 return nvkm_fifo_channel_fini(&chan->base, suspend);
337} 324}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 23d9c928cdc9..9a4ba4f03567 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -388,11 +388,13 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
388 copy_timings_drm_to_omap(&omap_crtc->timings, mode); 388 copy_timings_drm_to_omap(&omap_crtc->timings, mode);
389} 389}
390 390
391static void omap_crtc_atomic_begin(struct drm_crtc *crtc) 391static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
392 struct drm_crtc_state *old_crtc_state)
392{ 393{
393} 394}
394 395
395static void omap_crtc_atomic_flush(struct drm_crtc *crtc) 396static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
397 struct drm_crtc_state *old_crtc_state)
396{ 398{
397 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 399 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
398 400
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 720d16bce7e8..b8e4cdec28c3 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -86,11 +86,11 @@ static struct fb_ops omap_fb_ops = {
86 /* Note: to properly handle manual update displays, we wrap the 86 /* Note: to properly handle manual update displays, we wrap the
87 * basic fbdev ops which write to the framebuffer 87 * basic fbdev ops which write to the framebuffer
88 */ 88 */
89 .fb_read = fb_sys_read, 89 .fb_read = drm_fb_helper_sys_read,
90 .fb_write = fb_sys_write, 90 .fb_write = drm_fb_helper_sys_write,
91 .fb_fillrect = sys_fillrect, 91 .fb_fillrect = drm_fb_helper_sys_fillrect,
92 .fb_copyarea = sys_copyarea, 92 .fb_copyarea = drm_fb_helper_sys_copyarea,
93 .fb_imageblit = sys_imageblit, 93 .fb_imageblit = drm_fb_helper_sys_imageblit,
94 94
95 .fb_check_var = drm_fb_helper_check_var, 95 .fb_check_var = drm_fb_helper_check_var,
96 .fb_set_par = drm_fb_helper_set_par, 96 .fb_set_par = drm_fb_helper_set_par,
@@ -179,10 +179,10 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
179 179
180 mutex_lock(&dev->struct_mutex); 180 mutex_lock(&dev->struct_mutex);
181 181
182 fbi = framebuffer_alloc(0, dev->dev); 182 fbi = drm_fb_helper_alloc_fbi(helper);
183 if (!fbi) { 183 if (IS_ERR(fbi)) {
184 dev_err(dev->dev, "failed to allocate fb info\n"); 184 dev_err(dev->dev, "failed to allocate fb info\n");
185 ret = -ENOMEM; 185 ret = PTR_ERR(fbi);
186 goto fail_unlock; 186 goto fail_unlock;
187 } 187 }
188 188
@@ -190,7 +190,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
190 190
191 fbdev->fb = fb; 191 fbdev->fb = fb;
192 helper->fb = fb; 192 helper->fb = fb;
193 helper->fbdev = fbi;
194 193
195 fbi->par = helper; 194 fbi->par = helper;
196 fbi->flags = FBINFO_DEFAULT; 195 fbi->flags = FBINFO_DEFAULT;
@@ -198,12 +197,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
198 197
199 strcpy(fbi->fix.id, MODULE_NAME); 198 strcpy(fbi->fix.id, MODULE_NAME);
200 199
201 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
202 if (ret) {
203 ret = -ENOMEM;
204 goto fail_unlock;
205 }
206
207 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 200 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
208 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 201 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
209 202
@@ -236,8 +229,9 @@ fail_unlock:
236fail: 229fail:
237 230
238 if (ret) { 231 if (ret) {
239 if (fbi) 232
240 framebuffer_release(fbi); 233 drm_fb_helper_release_fbi(helper);
234
241 if (fb) { 235 if (fb) {
242 drm_framebuffer_unregister_private(fb); 236 drm_framebuffer_unregister_private(fb);
243 drm_framebuffer_remove(fb); 237 drm_framebuffer_remove(fb);
@@ -312,17 +306,11 @@ void omap_fbdev_free(struct drm_device *dev)
312 struct omap_drm_private *priv = dev->dev_private; 306 struct omap_drm_private *priv = dev->dev_private;
313 struct drm_fb_helper *helper = priv->fbdev; 307 struct drm_fb_helper *helper = priv->fbdev;
314 struct omap_fbdev *fbdev; 308 struct omap_fbdev *fbdev;
315 struct fb_info *fbi;
316 309
317 DBG(); 310 DBG();
318 311
319 fbi = helper->fbdev; 312 drm_fb_helper_unregister_fbi(helper);
320 313 drm_fb_helper_release_fbi(helper);
321 /* only cleanup framebuffer if it is present */
322 if (fbi) {
323 unregister_framebuffer(fbi);
324 framebuffer_release(fbi);
325 }
326 314
327 drm_fb_helper_fini(helper); 315 drm_fb_helper_fini(helper);
328 316
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6d64c7bb908b..7d4704b1292b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,13 +18,21 @@ config DRM_PANEL_SIMPLE
18 that it can be automatically turned off when the panel goes into a 18 that it can be automatically turned off when the panel goes into a
19 low power state. 19 low power state.
20 20
21config DRM_PANEL_LD9040 21config DRM_PANEL_SAMSUNG_LD9040
22 tristate "LD9040 RGB/SPI panel" 22 tristate "Samsung LD9040 RGB/SPI panel"
23 depends on OF && SPI 23 depends on OF && SPI
24 select VIDEOMODE_HELPERS 24 select VIDEOMODE_HELPERS
25 25
26config DRM_PANEL_S6E8AA0 26config DRM_PANEL_LG_LG4573
27 tristate "S6E8AA0 DSI video mode panel" 27 tristate "LG4573 RGB/SPI panel"
28 depends on OF && SPI
29 select VIDEOMODE_HELPERS
30 help
31 Say Y here if you want to enable support for LG4573 RGB panel.
32 To compile this driver as a module, choose M here.
33
34config DRM_PANEL_SAMSUNG_S6E8AA0
35 tristate "Samsung S6E8AA0 DSI video mode panel"
28 depends on OF 36 depends on OF
29 select DRM_MIPI_DSI 37 select DRM_MIPI_DSI
30 select VIDEOMODE_HELPERS 38 select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 4b2a0430804b..d0f016dd7ddb 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o 1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
2obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o 2obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
3obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o 3obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
4obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
4obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 5obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
new file mode 100644
index 000000000000..a7b4939cee6d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -0,0 +1,298 @@
1/*
2 * Copyright (C) 2015 Heiko Schocher <hs@denx.de>
3 *
4 * from:
5 * drivers/gpu/drm/panel/panel-ld9040.c
6 * ld9040 AMOLED LCD drm_panel driver.
7 *
8 * Copyright (c) 2014 Samsung Electronics Co., Ltd
9 * Derived from drivers/video/backlight/ld9040.c
10 *
11 * Andrzej Hajda <a.hajda@samsung.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16*/
17
18#include <drm/drmP.h>
19#include <drm/drm_panel.h>
20
21#include <linux/gpio/consumer.h>
22#include <linux/regulator/consumer.h>
23#include <linux/spi/spi.h>
24
25#include <video/mipi_display.h>
26#include <video/of_videomode.h>
27#include <video/videomode.h>
28
29struct lg4573 {
30 struct drm_panel panel;
31 struct spi_device *spi;
32 struct videomode vm;
33};
34
35static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel)
36{
37 return container_of(panel, struct lg4573, panel);
38}
39
40static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
41{
42 struct spi_transfer xfer = {
43 .len = 2,
44 };
45 u16 temp = cpu_to_be16(data);
46 struct spi_message msg;
47
48 dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
49 xfer.tx_buf = &temp;
50 spi_message_init(&msg);
51 spi_message_add_tail(&xfer, &msg);
52
53 return spi_sync(ctx->spi, &msg);
54}
55
56static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer,
57 unsigned int count)
58{
59 unsigned int i;
60 int ret;
61
62 for (i = 0; i < count; i++) {
63 ret = lg4573_spi_write_u16(ctx, buffer[i]);
64 if (ret)
65 return ret;
66 }
67
68 return 0;
69}
70
71static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs)
72{
73 return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs));
74}
75
76static int lg4573_display_on(struct lg4573 *ctx)
77{
78 int ret;
79
80 ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
81 if (ret)
82 return ret;
83
84 msleep(5);
85
86 return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON);
87}
88
89static int lg4573_display_off(struct lg4573 *ctx)
90{
91 int ret;
92
93 ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF);
94 if (ret)
95 return ret;
96
97 msleep(120);
98
99 return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
100}
101
102static int lg4573_display_mode_settings(struct lg4573 *ctx)
103{
104 static const u16 display_mode_settings[] = {
105 0x703A, 0x7270, 0x70B1, 0x7208,
106 0x723B, 0x720F, 0x70B2, 0x7200,
107 0x72C8, 0x70B3, 0x7200, 0x70B4,
108 0x7200, 0x70B5, 0x7242, 0x7210,
109 0x7210, 0x7200, 0x7220, 0x70B6,
110 0x720B, 0x720F, 0x723C, 0x7213,
111 0x7213, 0x72E8, 0x70B7, 0x7246,
112 0x7206, 0x720C, 0x7200, 0x7200,
113 };
114
115 dev_dbg(ctx->panel.dev, "transfer display mode settings\n");
116 return lg4573_spi_write_u16_array(ctx, display_mode_settings,
117 ARRAY_SIZE(display_mode_settings));
118}
119
120static int lg4573_power_settings(struct lg4573 *ctx)
121{
122 static const u16 power_settings[] = {
123 0x70C0, 0x7201, 0x7211, 0x70C3,
124 0x7207, 0x7203, 0x7204, 0x7204,
125 0x7204, 0x70C4, 0x7212, 0x7224,
126 0x7218, 0x7218, 0x7202, 0x7249,
127 0x70C5, 0x726F, 0x70C6, 0x7241,
128 0x7263,
129 };
130
131 dev_dbg(ctx->panel.dev, "transfer power settings\n");
132 return lg4573_spi_write_u16_array(ctx, power_settings,
133 ARRAY_SIZE(power_settings));
134}
135
136static int lg4573_gamma_settings(struct lg4573 *ctx)
137{
138 static const u16 gamma_settings[] = {
139 0x70D0, 0x7203, 0x7207, 0x7273,
140 0x7235, 0x7200, 0x7201, 0x7220,
141 0x7200, 0x7203, 0x70D1, 0x7203,
142 0x7207, 0x7273, 0x7235, 0x7200,
143 0x7201, 0x7220, 0x7200, 0x7203,
144 0x70D2, 0x7203, 0x7207, 0x7273,
145 0x7235, 0x7200, 0x7201, 0x7220,
146 0x7200, 0x7203, 0x70D3, 0x7203,
147 0x7207, 0x7273, 0x7235, 0x7200,
148 0x7201, 0x7220, 0x7200, 0x7203,
149 0x70D4, 0x7203, 0x7207, 0x7273,
150 0x7235, 0x7200, 0x7201, 0x7220,
151 0x7200, 0x7203, 0x70D5, 0x7203,
152 0x7207, 0x7273, 0x7235, 0x7200,
153 0x7201, 0x7220, 0x7200, 0x7203,
154 };
155
156 dev_dbg(ctx->panel.dev, "transfer gamma settings\n");
157 return lg4573_spi_write_u16_array(ctx, gamma_settings,
158 ARRAY_SIZE(gamma_settings));
159}
160
161static int lg4573_init(struct lg4573 *ctx)
162{
163 int ret;
164
165 dev_dbg(ctx->panel.dev, "initializing LCD\n");
166
167 ret = lg4573_display_mode_settings(ctx);
168 if (ret)
169 return ret;
170
171 ret = lg4573_power_settings(ctx);
172 if (ret)
173 return ret;
174
175 return lg4573_gamma_settings(ctx);
176}
177
178static int lg4573_power_on(struct lg4573 *ctx)
179{
180 return lg4573_display_on(ctx);
181}
182
183static int lg4573_disable(struct drm_panel *panel)
184{
185 struct lg4573 *ctx = panel_to_lg4573(panel);
186
187 return lg4573_display_off(ctx);
188}
189
190static int lg4573_enable(struct drm_panel *panel)
191{
192 struct lg4573 *ctx = panel_to_lg4573(panel);
193
194 lg4573_init(ctx);
195
196 return lg4573_power_on(ctx);
197}
198
199static const struct drm_display_mode default_mode = {
200 .clock = 27000,
201 .hdisplay = 480,
202 .hsync_start = 480 + 10,
203 .hsync_end = 480 + 10 + 59,
204 .htotal = 480 + 10 + 59 + 10,
205 .vdisplay = 800,
206 .vsync_start = 800 + 15,
207 .vsync_end = 800 + 15 + 15,
208 .vtotal = 800 + 15 + 15 + 15,
209 .vrefresh = 60,
210};
211
212static int lg4573_get_modes(struct drm_panel *panel)
213{
214 struct drm_connector *connector = panel->connector;
215 struct drm_display_mode *mode;
216
217 mode = drm_mode_duplicate(panel->drm, &default_mode);
218 if (!mode) {
219 dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
220 default_mode.hdisplay, default_mode.vdisplay,
221 default_mode.vrefresh);
222 return -ENOMEM;
223 }
224
225 drm_mode_set_name(mode);
226
227 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
228 drm_mode_probed_add(connector, mode);
229
230 panel->connector->display_info.width_mm = 61;
231 panel->connector->display_info.height_mm = 103;
232
233 return 1;
234}
235
236static const struct drm_panel_funcs lg4573_drm_funcs = {
237 .disable = lg4573_disable,
238 .enable = lg4573_enable,
239 .get_modes = lg4573_get_modes,
240};
241
242static int lg4573_probe(struct spi_device *spi)
243{
244 struct lg4573 *ctx;
245 int ret;
246
247 ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
248 if (!ctx)
249 return -ENOMEM;
250
251 ctx->spi = spi;
252
253 spi_set_drvdata(spi, ctx);
254 spi->bits_per_word = 8;
255
256 ret = spi_setup(spi);
257 if (ret < 0) {
258 dev_err(&spi->dev, "SPI setup failed: %d\n", ret);
259 return ret;
260 }
261
262 drm_panel_init(&ctx->panel);
263 ctx->panel.dev = &spi->dev;
264 ctx->panel.funcs = &lg4573_drm_funcs;
265
266 return drm_panel_add(&ctx->panel);
267}
268
269static int lg4573_remove(struct spi_device *spi)
270{
271 struct lg4573 *ctx = spi_get_drvdata(spi);
272
273 lg4573_display_off(ctx);
274 drm_panel_remove(&ctx->panel);
275
276 return 0;
277}
278
279static const struct of_device_id lg4573_of_match[] = {
280 { .compatible = "lg,lg4573" },
281 { }
282};
283MODULE_DEVICE_TABLE(of, lg4573_of_match);
284
285static struct spi_driver lg4573_driver = {
286 .probe = lg4573_probe,
287 .remove = lg4573_remove,
288 .driver = {
289 .name = "lg4573",
290 .owner = THIS_MODULE,
291 .of_match_table = lg4573_of_match,
292 },
293};
294module_spi_driver(lg4573_driver);
295
296MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
297MODULE_DESCRIPTION("lg4573 LCD Driver");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9c27bded4c09..b202377135e7 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -377,7 +377,7 @@ static struct spi_driver ld9040_driver = {
377 .probe = ld9040_probe, 377 .probe = ld9040_probe,
378 .remove = ld9040_remove, 378 .remove = ld9040_remove,
379 .driver = { 379 .driver = {
380 .name = "ld9040", 380 .name = "panel-samsung-ld9040",
381 .owner = THIS_MODULE, 381 .owner = THIS_MODULE,
382 .of_match_table = ld9040_of_match, 382 .of_match_table = ld9040_of_match,
383 }, 383 },
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 30051108eec4..a188a3959f1a 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1051,7 +1051,7 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
1051 .probe = s6e8aa0_probe, 1051 .probe = s6e8aa0_probe,
1052 .remove = s6e8aa0_remove, 1052 .remove = s6e8aa0_remove,
1053 .driver = { 1053 .driver = {
1054 .name = "panel_s6e8aa0", 1054 .name = "panel-samsung-s6e8aa0",
1055 .of_match_table = s6e8aa0_of_match, 1055 .of_match_table = s6e8aa0_of_match,
1056 }, 1056 },
1057}; 1057};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f94201b6e882..f97b73ec4713 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -713,7 +713,12 @@ static const struct display_timing hannstar_hsd070pww1_timing = {
713 .hactive = { 1280, 1280, 1280 }, 713 .hactive = { 1280, 1280, 1280 },
714 .hfront_porch = { 1, 1, 10 }, 714 .hfront_porch = { 1, 1, 10 },
715 .hback_porch = { 1, 1, 10 }, 715 .hback_porch = { 1, 1, 10 },
716 .hsync_len = { 52, 158, 661 }, 716 /*
717 * According to the data sheet, the minimum horizontal blanking interval
718 * is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the
719 * minimum working horizontal blanking interval to be 60 clocks.
720 */
721 .hsync_len = { 58, 158, 661 },
717 .vactive = { 800, 800, 800 }, 722 .vactive = { 800, 800, 800 },
718 .vfront_porch = { 1, 1, 10 }, 723 .vfront_porch = { 1, 1, 10 },
719 .vback_porch = { 1, 1, 10 }, 724 .vback_porch = { 1, 1, 10 },
@@ -729,6 +734,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
729 .width = 151, 734 .width = 151,
730 .height = 94, 735 .height = 94,
731 }, 736 },
737 .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
732}; 738};
733 739
734static const struct display_timing hannstar_hsd100pxn1_timing = { 740static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -943,6 +949,60 @@ static const struct panel_desc lg_lp129qe = {
943 }, 949 },
944}; 950};
945 951
952static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
953 .clock = 10870,
954 .hdisplay = 480,
955 .hsync_start = 480 + 2,
956 .hsync_end = 480 + 2 + 41,
957 .htotal = 480 + 2 + 41 + 2,
958 .vdisplay = 272,
959 .vsync_start = 272 + 2,
960 .vsync_end = 272 + 2 + 4,
961 .vtotal = 272 + 2 + 4 + 2,
962 .vrefresh = 74,
963};
964
965static const struct panel_desc nec_nl4827hc19_05b = {
966 .modes = &nec_nl4827hc19_05b_mode,
967 .num_modes = 1,
968 .bpc = 8,
969 .size = {
970 .width = 95,
971 .height = 54,
972 },
973 .bus_format = MEDIA_BUS_FMT_RGB888_1X24
974};
975
976static const struct display_timing okaya_rs800480t_7x0gp_timing = {
977 .pixelclock = { 30000000, 30000000, 40000000 },
978 .hactive = { 800, 800, 800 },
979 .hfront_porch = { 40, 40, 40 },
980 .hback_porch = { 40, 40, 40 },
981 .hsync_len = { 1, 48, 48 },
982 .vactive = { 480, 480, 480 },
983 .vfront_porch = { 13, 13, 13 },
984 .vback_porch = { 29, 29, 29 },
985 .vsync_len = { 3, 3, 3 },
986 .flags = DISPLAY_FLAGS_DE_HIGH,
987};
988
989static const struct panel_desc okaya_rs800480t_7x0gp = {
990 .timings = &okaya_rs800480t_7x0gp_timing,
991 .num_timings = 1,
992 .bpc = 6,
993 .size = {
994 .width = 154,
995 .height = 87,
996 },
997 .delay = {
998 .prepare = 41,
999 .enable = 50,
1000 .unprepare = 41,
1001 .disable = 50,
1002 },
1003 .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
1004};
1005
946static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { 1006static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
947 .clock = 25000, 1007 .clock = 25000,
948 .hdisplay = 480, 1008 .hdisplay = 480,
@@ -1113,6 +1173,12 @@ static const struct of_device_id platform_of_match[] = {
1113 .compatible = "lg,lp129qe", 1173 .compatible = "lg,lp129qe",
1114 .data = &lg_lp129qe, 1174 .data = &lg_lp129qe,
1115 }, { 1175 }, {
1176 .compatible = "nec,nl4827hc19-05b",
1177 .data = &nec_nl4827hc19_05b,
1178 }, {
1179 .compatible = "okaya,rs800480t-7x0gp",
1180 .data = &okaya_rs800480t_7x0gp,
1181 }, {
1116 .compatible = "ortustech,com43h4m85ulc", 1182 .compatible = "ortustech,com43h4m85ulc",
1117 .data = &ortustech_com43h4m85ulc, 1183 .data = &ortustech_com43h4m85ulc,
1118 }, { 1184 }, {
@@ -1169,6 +1235,34 @@ struct panel_desc_dsi {
1169 unsigned int lanes; 1235 unsigned int lanes;
1170}; 1236};
1171 1237
1238static const struct drm_display_mode auo_b080uan01_mode = {
1239 .clock = 154500,
1240 .hdisplay = 1200,
1241 .hsync_start = 1200 + 62,
1242 .hsync_end = 1200 + 62 + 4,
1243 .htotal = 1200 + 62 + 4 + 62,
1244 .vdisplay = 1920,
1245 .vsync_start = 1920 + 9,
1246 .vsync_end = 1920 + 9 + 2,
1247 .vtotal = 1920 + 9 + 2 + 8,
1248 .vrefresh = 60,
1249};
1250
1251static const struct panel_desc_dsi auo_b080uan01 = {
1252 .desc = {
1253 .modes = &auo_b080uan01_mode,
1254 .num_modes = 1,
1255 .bpc = 8,
1256 .size = {
1257 .width = 108,
1258 .height = 272,
1259 },
1260 },
1261 .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
1262 .format = MIPI_DSI_FMT_RGB888,
1263 .lanes = 4,
1264};
1265
1172static const struct drm_display_mode lg_ld070wx3_sl01_mode = { 1266static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
1173 .clock = 71000, 1267 .clock = 71000,
1174 .hdisplay = 800, 1268 .hdisplay = 800,
@@ -1256,6 +1350,9 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
1256 1350
1257static const struct of_device_id dsi_of_match[] = { 1351static const struct of_device_id dsi_of_match[] = {
1258 { 1352 {
1353 .compatible = "auo,b080uan01",
1354 .data = &auo_b080uan01
1355 }, {
1259 .compatible = "lg,ld070wx3-sl01", 1356 .compatible = "lg,ld070wx3-sl01",
1260 .data = &lg_ld070wx3_sl01 1357 .data = &lg_ld070wx3_sl01
1261 }, { 1358 }, {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 6b6e57e8c2d6..41c422fee31a 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -197,7 +197,7 @@ static void qxl_fb_fillrect(struct fb_info *info,
197{ 197{
198 struct qxl_fbdev *qfbdev = info->par; 198 struct qxl_fbdev *qfbdev = info->par;
199 199
200 sys_fillrect(info, rect); 200 drm_fb_helper_sys_fillrect(info, rect);
201 qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width, 201 qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
202 rect->height); 202 rect->height);
203} 203}
@@ -207,7 +207,7 @@ static void qxl_fb_copyarea(struct fb_info *info,
207{ 207{
208 struct qxl_fbdev *qfbdev = info->par; 208 struct qxl_fbdev *qfbdev = info->par;
209 209
210 sys_copyarea(info, area); 210 drm_fb_helper_sys_copyarea(info, area);
211 qxl_dirty_update(qfbdev, area->dx, area->dy, area->width, 211 qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
212 area->height); 212 area->height);
213} 213}
@@ -217,7 +217,7 @@ static void qxl_fb_imageblit(struct fb_info *info,
217{ 217{
218 struct qxl_fbdev *qfbdev = info->par; 218 struct qxl_fbdev *qfbdev = info->par;
219 219
220 sys_imageblit(info, image); 220 drm_fb_helper_sys_imageblit(info, image);
221 qxl_dirty_update(qfbdev, image->dx, image->dy, image->width, 221 qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
222 image->height); 222 image->height);
223} 223}
@@ -345,7 +345,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
345 struct drm_mode_fb_cmd2 mode_cmd; 345 struct drm_mode_fb_cmd2 mode_cmd;
346 struct drm_gem_object *gobj = NULL; 346 struct drm_gem_object *gobj = NULL;
347 struct qxl_bo *qbo = NULL; 347 struct qxl_bo *qbo = NULL;
348 struct device *device = &qdev->pdev->dev;
349 int ret; 348 int ret;
350 int size; 349 int size;
351 int bpp = sizes->surface_bpp; 350 int bpp = sizes->surface_bpp;
@@ -374,9 +373,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
374 shadow); 373 shadow);
375 size = mode_cmd.pitches[0] * mode_cmd.height; 374 size = mode_cmd.pitches[0] * mode_cmd.height;
376 375
377 info = framebuffer_alloc(0, device); 376 info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
378 if (info == NULL) { 377 if (IS_ERR(info)) {
379 ret = -ENOMEM; 378 ret = PTR_ERR(info);
380 goto out_unref; 379 goto out_unref;
381 } 380 }
382 381
@@ -388,7 +387,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
388 387
389 /* setup helper with fb data */ 388 /* setup helper with fb data */
390 qfbdev->helper.fb = fb; 389 qfbdev->helper.fb = fb;
391 qfbdev->helper.fbdev = info; 390
392 qfbdev->shadow = shadow; 391 qfbdev->shadow = shadow;
393 strcpy(info->fix.id, "qxldrmfb"); 392 strcpy(info->fix.id, "qxldrmfb");
394 393
@@ -410,11 +409,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
410 sizes->fb_height); 409 sizes->fb_height);
411 410
412 /* setup aperture base/size for vesafb takeover */ 411 /* setup aperture base/size for vesafb takeover */
413 info->apertures = alloc_apertures(1);
414 if (!info->apertures) {
415 ret = -ENOMEM;
416 goto out_unref;
417 }
418 info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base; 412 info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
419 info->apertures->ranges[0].size = qdev->vram_size; 413 info->apertures->ranges[0].size = qdev->vram_size;
420 414
@@ -423,13 +417,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
423 417
424 if (info->screen_base == NULL) { 418 if (info->screen_base == NULL) {
425 ret = -ENOSPC; 419 ret = -ENOSPC;
426 goto out_unref; 420 goto out_destroy_fbi;
427 }
428
429 ret = fb_alloc_cmap(&info->cmap, 256, 0);
430 if (ret) {
431 ret = -ENOMEM;
432 goto out_unref;
433 } 421 }
434 422
435 info->fbdefio = &qxl_defio; 423 info->fbdefio = &qxl_defio;
@@ -441,6 +429,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
441 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height); 429 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
442 return 0; 430 return 0;
443 431
432out_destroy_fbi:
433 drm_fb_helper_release_fbi(&qfbdev->helper);
444out_unref: 434out_unref:
445 if (qbo) { 435 if (qbo) {
446 ret = qxl_bo_reserve(qbo, false); 436 ret = qxl_bo_reserve(qbo, false);
@@ -479,15 +469,11 @@ static int qxl_fb_find_or_create_single(
479 469
480static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) 470static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
481{ 471{
482 struct fb_info *info;
483 struct qxl_framebuffer *qfb = &qfbdev->qfb; 472 struct qxl_framebuffer *qfb = &qfbdev->qfb;
484 473
485 if (qfbdev->helper.fbdev) { 474 drm_fb_helper_unregister_fbi(&qfbdev->helper);
486 info = qfbdev->helper.fbdev; 475 drm_fb_helper_release_fbi(&qfbdev->helper);
487 476
488 unregister_framebuffer(info);
489 framebuffer_release(info);
490 }
491 if (qfb->obj) { 477 if (qfb->obj) {
492 qxlfb_destroy_pinned_object(qfb->obj); 478 qxlfb_destroy_pinned_object(qfb->obj);
493 qfb->obj = NULL; 479 qfb->obj = NULL;
@@ -557,7 +543,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
557 543
558void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) 544void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
559{ 545{
560 fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state); 546 drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
561} 547}
562 548
563bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj) 549bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6d6f33de48f4..b28370e014c6 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -272,7 +272,6 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
272 return; 272 return;
273 dev_err(qdev->dev, "Userspace still has active objects !\n"); 273 dev_err(qdev->dev, "Userspace still has active objects !\n");
274 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 274 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
275 mutex_lock(&qdev->ddev->struct_mutex);
276 dev_err(qdev->dev, "%p %p %lu %lu force free\n", 275 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
277 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 276 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
278 *((unsigned long *)&bo->gem_base.refcount)); 277 *((unsigned long *)&bo->gem_base.refcount));
@@ -280,8 +279,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
280 list_del_init(&bo->list); 279 list_del_init(&bo->list);
281 mutex_unlock(&qdev->gem.mutex); 280 mutex_unlock(&qdev->gem.mutex);
282 /* this should unref the ttm bo */ 281 /* this should unref the ttm bo */
283 drm_gem_object_unreference(&bo->gem_base); 282 drm_gem_object_unreference_unlocked(&bo->gem_base);
284 mutex_unlock(&qdev->ddev->struct_mutex);
285 } 283 }
286} 284}
287 285
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 44480c1b9738..752072771388 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -76,16 +76,35 @@ static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
76 76
77struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev) 77struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
78{ 78{
79 int i; 79 struct drm_encoder *encoder;
80 struct radeon_encoder *radeon_encoder;
81 struct radeon_encoder_atom_dig *dig;
82 struct r600_audio_pin *pin = NULL;
83 int i, pin_count;
80 84
81 dce6_afmt_get_connected_pins(rdev); 85 dce6_afmt_get_connected_pins(rdev);
82 86
83 for (i = 0; i < rdev->audio.num_pins; i++) { 87 for (i = 0; i < rdev->audio.num_pins; i++) {
84 if (rdev->audio.pin[i].connected) 88 if (rdev->audio.pin[i].connected) {
85 return &rdev->audio.pin[i]; 89 pin = &rdev->audio.pin[i];
90 pin_count = 0;
91
92 list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
93 if (radeon_encoder_is_digital(encoder)) {
94 radeon_encoder = to_radeon_encoder(encoder);
95 dig = radeon_encoder->enc_priv;
96 if (dig->pin == pin)
97 pin_count++;
98 }
99 }
100
101 if (pin_count == 0)
102 return pin;
103 }
86 } 104 }
87 DRM_ERROR("No connected audio pins found!\n"); 105 if (!pin)
88 return NULL; 106 DRM_ERROR("No connected audio pins found!\n");
107 return pin;
89} 108}
90 109
91void dce6_afmt_select_pin(struct drm_encoder *encoder) 110void dce6_afmt_select_pin(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index aeb676708e60..7214858ffcea 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -82,9 +82,9 @@ static struct fb_ops radeonfb_ops = {
82 .owner = THIS_MODULE, 82 .owner = THIS_MODULE,
83 .fb_check_var = drm_fb_helper_check_var, 83 .fb_check_var = drm_fb_helper_check_var,
84 .fb_set_par = radeon_fb_helper_set_par, 84 .fb_set_par = radeon_fb_helper_set_par,
85 .fb_fillrect = cfb_fillrect, 85 .fb_fillrect = drm_fb_helper_cfb_fillrect,
86 .fb_copyarea = cfb_copyarea, 86 .fb_copyarea = drm_fb_helper_cfb_copyarea,
87 .fb_imageblit = cfb_imageblit, 87 .fb_imageblit = drm_fb_helper_cfb_imageblit,
88 .fb_pan_display = drm_fb_helper_pan_display, 88 .fb_pan_display = drm_fb_helper_pan_display,
89 .fb_blank = drm_fb_helper_blank, 89 .fb_blank = drm_fb_helper_blank,
90 .fb_setcmap = drm_fb_helper_setcmap, 90 .fb_setcmap = drm_fb_helper_setcmap,
@@ -227,7 +227,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
227 struct drm_mode_fb_cmd2 mode_cmd; 227 struct drm_mode_fb_cmd2 mode_cmd;
228 struct drm_gem_object *gobj = NULL; 228 struct drm_gem_object *gobj = NULL;
229 struct radeon_bo *rbo = NULL; 229 struct radeon_bo *rbo = NULL;
230 struct device *device = &rdev->pdev->dev;
231 int ret; 230 int ret;
232 unsigned long tmp; 231 unsigned long tmp;
233 232
@@ -250,9 +249,9 @@ static int radeonfb_create(struct drm_fb_helper *helper,
250 rbo = gem_to_radeon_bo(gobj); 249 rbo = gem_to_radeon_bo(gobj);
251 250
252 /* okay we have an object now allocate the framebuffer */ 251 /* okay we have an object now allocate the framebuffer */
253 info = framebuffer_alloc(0, device); 252 info = drm_fb_helper_alloc_fbi(helper);
254 if (info == NULL) { 253 if (IS_ERR(info)) {
255 ret = -ENOMEM; 254 ret = PTR_ERR(info);
256 goto out_unref; 255 goto out_unref;
257 } 256 }
258 257
@@ -262,14 +261,13 @@ static int radeonfb_create(struct drm_fb_helper *helper,
262 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 261 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
263 if (ret) { 262 if (ret) {
264 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 263 DRM_ERROR("failed to initialize framebuffer %d\n", ret);
265 goto out_unref; 264 goto out_destroy_fbi;
266 } 265 }
267 266
268 fb = &rfbdev->rfb.base; 267 fb = &rfbdev->rfb.base;
269 268
270 /* setup helper */ 269 /* setup helper */
271 rfbdev->helper.fb = fb; 270 rfbdev->helper.fb = fb;
272 rfbdev->helper.fbdev = info;
273 271
274 memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); 272 memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
275 273
@@ -289,11 +287,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
289 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); 287 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
290 288
291 /* setup aperture base/size for vesafb takeover */ 289 /* setup aperture base/size for vesafb takeover */
292 info->apertures = alloc_apertures(1);
293 if (!info->apertures) {
294 ret = -ENOMEM;
295 goto out_unref;
296 }
297 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; 290 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
298 info->apertures->ranges[0].size = rdev->mc.aper_size; 291 info->apertures->ranges[0].size = rdev->mc.aper_size;
299 292
@@ -301,13 +294,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
301 294
302 if (info->screen_base == NULL) { 295 if (info->screen_base == NULL) {
303 ret = -ENOSPC; 296 ret = -ENOSPC;
304 goto out_unref; 297 goto out_destroy_fbi;
305 }
306
307 ret = fb_alloc_cmap(&info->cmap, 256, 0);
308 if (ret) {
309 ret = -ENOMEM;
310 goto out_unref;
311 } 298 }
312 299
313 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 300 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@@ -319,6 +306,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
319 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); 306 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
320 return 0; 307 return 0;
321 308
309out_destroy_fbi:
310 drm_fb_helper_release_fbi(helper);
322out_unref: 311out_unref:
323 if (rbo) { 312 if (rbo) {
324 313
@@ -339,17 +328,10 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
339 328
340static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) 329static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
341{ 330{
342 struct fb_info *info;
343 struct radeon_framebuffer *rfb = &rfbdev->rfb; 331 struct radeon_framebuffer *rfb = &rfbdev->rfb;
344 332
345 if (rfbdev->helper.fbdev) { 333 drm_fb_helper_unregister_fbi(&rfbdev->helper);
346 info = rfbdev->helper.fbdev; 334 drm_fb_helper_release_fbi(&rfbdev->helper);
347
348 unregister_framebuffer(info);
349 if (info->cmap.len)
350 fb_dealloc_cmap(&info->cmap);
351 framebuffer_release(info);
352 }
353 335
354 if (rfb->obj) { 336 if (rfb->obj) {
355 radeonfb_destroy_pinned_object(rfb->obj); 337 radeonfb_destroy_pinned_object(rfb->obj);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa464f3..171d3e43c30c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
81 81
82 /* we can race here at startup, some boards seem to trigger
83 * hotplug irqs when they shouldn't. */
84 if (!rdev->mode_info.mode_config_initialized)
85 return;
86
82 mutex_lock(&mode_config->mutex); 87 mutex_lock(&mode_config->mutex);
83 if (mode_config->num_connector) { 88 if (mode_config->num_connector) {
84 list_for_each_entry(connector, &mode_config->connector_list, head) 89 list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 676362769b8d..d3024883b844 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -419,7 +419,6 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
419 } 419 }
420 dev_err(rdev->dev, "Userspace still has active objects !\n"); 420 dev_err(rdev->dev, "Userspace still has active objects !\n");
421 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 421 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
422 mutex_lock(&rdev->ddev->struct_mutex);
423 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 422 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
424 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 423 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
425 *((unsigned long *)&bo->gem_base.refcount)); 424 *((unsigned long *)&bo->gem_base.refcount));
@@ -427,8 +426,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
427 list_del_init(&bo->list); 426 list_del_init(&bo->list);
428 mutex_unlock(&bo->rdev->gem.mutex); 427 mutex_unlock(&bo->rdev->gem.mutex);
429 /* this should unref the ttm bo */ 428 /* this should unref the ttm bo */
430 drm_gem_object_unreference(&bo->gem_base); 429 drm_gem_object_unreference_unlocked(&bo->gem_base);
431 mutex_unlock(&rdev->ddev->struct_mutex);
432 } 430 }
433} 431}
434 432
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c1ba83a8dd8c..05751f3f8444 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -253,7 +253,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
253 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 253 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
254 return; 254 return;
255 255
256 mutex_lock(&rdev->ddev->struct_mutex);
257 down_write(&rdev->pm.mclk_lock); 256 down_write(&rdev->pm.mclk_lock);
258 mutex_lock(&rdev->ring_lock); 257 mutex_lock(&rdev->ring_lock);
259 258
@@ -268,7 +267,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
268 /* needs a GPU reset dont reset here */ 267 /* needs a GPU reset dont reset here */
269 mutex_unlock(&rdev->ring_lock); 268 mutex_unlock(&rdev->ring_lock);
270 up_write(&rdev->pm.mclk_lock); 269 up_write(&rdev->pm.mclk_lock);
271 mutex_unlock(&rdev->ddev->struct_mutex);
272 return; 270 return;
273 } 271 }
274 } 272 }
@@ -304,7 +302,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
304 302
305 mutex_unlock(&rdev->ring_lock); 303 mutex_unlock(&rdev->ring_lock);
306 up_write(&rdev->pm.mclk_lock); 304 up_write(&rdev->pm.mclk_lock);
307 mutex_unlock(&rdev->ddev->struct_mutex);
308} 305}
309 306
310static void radeon_pm_print_states(struct radeon_device *rdev) 307static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -1062,7 +1059,6 @@ force:
1062 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 1059 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
1063 } 1060 }
1064 1061
1065 mutex_lock(&rdev->ddev->struct_mutex);
1066 down_write(&rdev->pm.mclk_lock); 1062 down_write(&rdev->pm.mclk_lock);
1067 mutex_lock(&rdev->ring_lock); 1063 mutex_lock(&rdev->ring_lock);
1068 1064
@@ -1113,7 +1109,6 @@ force:
1113done: 1109done:
1114 mutex_unlock(&rdev->ring_lock); 1110 mutex_unlock(&rdev->ring_lock);
1115 up_write(&rdev->pm.mclk_lock); 1111 up_write(&rdev->pm.mclk_lock);
1116 mutex_unlock(&rdev->ddev->struct_mutex);
1117} 1112}
1118 1113
1119void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) 1114void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 5b0dc0f6fd94..f261512bb4a0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,9 +37,9 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
37static struct fb_ops rockchip_drm_fbdev_ops = { 37static struct fb_ops rockchip_drm_fbdev_ops = {
38 .owner = THIS_MODULE, 38 .owner = THIS_MODULE,
39 .fb_mmap = rockchip_fbdev_mmap, 39 .fb_mmap = rockchip_fbdev_mmap,
40 .fb_fillrect = cfb_fillrect, 40 .fb_fillrect = drm_fb_helper_cfb_fillrect,
41 .fb_copyarea = cfb_copyarea, 41 .fb_copyarea = drm_fb_helper_cfb_copyarea,
42 .fb_imageblit = cfb_imageblit, 42 .fb_imageblit = drm_fb_helper_cfb_imageblit,
43 .fb_check_var = drm_fb_helper_check_var, 43 .fb_check_var = drm_fb_helper_check_var,
44 .fb_set_par = drm_fb_helper_set_par, 44 .fb_set_par = drm_fb_helper_set_par,
45 .fb_blank = drm_fb_helper_blank, 45 .fb_blank = drm_fb_helper_blank,
@@ -77,10 +77,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
77 77
78 private->fbdev_bo = &rk_obj->base; 78 private->fbdev_bo = &rk_obj->base;
79 79
80 fbi = framebuffer_alloc(0, dev->dev); 80 fbi = drm_fb_helper_alloc_fbi(helper);
81 if (!fbi) { 81 if (IS_ERR(fbi)) {
82 dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); 82 dev_err(dev->dev, "Failed to create framebuffer info.\n");
83 ret = -ENOMEM; 83 ret = PTR_ERR(fbi);
84 goto err_rockchip_gem_free_object; 84 goto err_rockchip_gem_free_object;
85 } 85 }
86 86
@@ -89,21 +89,13 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
89 if (IS_ERR(helper->fb)) { 89 if (IS_ERR(helper->fb)) {
90 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); 90 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
91 ret = PTR_ERR(helper->fb); 91 ret = PTR_ERR(helper->fb);
92 goto err_framebuffer_release; 92 goto err_release_fbi;
93 } 93 }
94 94
95 helper->fbdev = fbi;
96
97 fbi->par = helper; 95 fbi->par = helper;
98 fbi->flags = FBINFO_FLAG_DEFAULT; 96 fbi->flags = FBINFO_FLAG_DEFAULT;
99 fbi->fbops = &rockchip_drm_fbdev_ops; 97 fbi->fbops = &rockchip_drm_fbdev_ops;
100 98
101 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
102 if (ret) {
103 dev_err(dev->dev, "Failed to allocate color map.\n");
104 goto err_drm_framebuffer_unref;
105 }
106
107 fb = helper->fb; 99 fb = helper->fb;
108 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 100 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
109 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 101 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -124,10 +116,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
124 116
125 return 0; 117 return 0;
126 118
127err_drm_framebuffer_unref: 119err_release_fbi:
128 drm_framebuffer_unreference(helper->fb); 120 drm_fb_helper_release_fbi(helper);
129err_framebuffer_release:
130 framebuffer_release(fbi);
131err_rockchip_gem_free_object: 121err_rockchip_gem_free_object:
132 rockchip_gem_free_object(&rk_obj->base); 122 rockchip_gem_free_object(&rk_obj->base);
133 return ret; 123 return ret;
@@ -190,21 +180,8 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
190 180
191 helper = &private->fbdev_helper; 181 helper = &private->fbdev_helper;
192 182
193 if (helper->fbdev) { 183 drm_fb_helper_unregister_fbi(helper);
194 struct fb_info *info; 184 drm_fb_helper_release_fbi(helper);
195 int ret;
196
197 info = helper->fbdev;
198 ret = unregister_framebuffer(info);
199 if (ret < 0)
200 DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
201 ret);
202
203 if (info->cmap.len)
204 fb_dealloc_cmap(&info->cmap);
205
206 framebuffer_release(info);
207 }
208 185
209 if (helper->fb) 186 if (helper->fb)
210 drm_framebuffer_unreference(helper->fb); 187 drm_framebuffer_unreference(helper->fb);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eba5f8a52fbd..a6d9104f7f15 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -200,13 +200,10 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
200 struct drm_gem_object *obj; 200 struct drm_gem_object *obj;
201 int ret; 201 int ret;
202 202
203 mutex_lock(&dev->struct_mutex);
204
205 obj = drm_gem_object_lookup(dev, file_priv, handle); 203 obj = drm_gem_object_lookup(dev, file_priv, handle);
206 if (!obj) { 204 if (!obj) {
207 DRM_ERROR("failed to lookup gem object.\n"); 205 DRM_ERROR("failed to lookup gem object.\n");
208 ret = -EINVAL; 206 return -EINVAL;
209 goto unlock;
210 } 207 }
211 208
212 ret = drm_gem_create_mmap_offset(obj); 209 ret = drm_gem_create_mmap_offset(obj);
@@ -217,10 +214,9 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
217 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); 214 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
218 215
219out: 216out:
220 drm_gem_object_unreference(obj); 217 drm_gem_object_unreference_unlocked(obj);
221unlock: 218
222 mutex_unlock(&dev->struct_mutex); 219 return 0;
223 return ret;
224} 220}
225 221
226/* 222/*
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4ee2d92..e27490b492a5 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
1sticompositor-y := \ 1sticompositor-y := \
2 sti_layer.o \
3 sti_mixer.o \ 2 sti_mixer.o \
4 sti_gdp.o \ 3 sti_gdp.o \
5 sti_vid.o \ 4 sti_vid.o \
6 sti_cursor.o \ 5 sti_cursor.o \
7 sti_compositor.o \ 6 sti_compositor.o \
8 sti_drm_crtc.o \ 7 sti_crtc.o \
9 sti_drm_plane.o 8 sti_plane.o
10 9
11stihdmi-y := sti_hdmi.o \ 10stihdmi-y := sti_hdmi.o \
12 sti_hdmi_tx3g0c55phy.o \ 11 sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \
24 sticompositor.o \ 23 sticompositor.o \
25 sti_hqvdp.o \ 24 sti_hqvdp.o \
26 stidvo.o \ 25 stidvo.o \
27 sti_drm_drv.o 26 sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3020fb..c652627b1bca 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15 15
16#include "sti_compositor.h" 16#include "sti_compositor.h"
17#include "sti_drm_crtc.h" 17#include "sti_crtc.h"
18#include "sti_drm_drv.h" 18#include "sti_cursor.h"
19#include "sti_drm_plane.h" 19#include "sti_drv.h"
20#include "sti_gdp.h" 20#include "sti_gdp.h"
21#include "sti_plane.h"
22#include "sti_vid.h"
21#include "sti_vtg.h" 23#include "sti_vtg.h"
22 24
23/* 25/*
@@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
31 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, 33 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
32 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, 34 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
33 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, 35 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
34 {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, 36 {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
35 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}, 37 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
36 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00}, 38 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
37 }, 39 },
@@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = {
53 }, 55 },
54}; 56};
55 57
56static int sti_compositor_init_subdev(struct sti_compositor *compo, 58static int sti_compositor_bind(struct device *dev,
57 struct sti_compositor_subdev_descriptor *desc, 59 struct device *master,
58 unsigned int array_size) 60 void *data)
59{ 61{
60 unsigned int i, mixer_id = 0, layer_id = 0; 62 struct sti_compositor *compo = dev_get_drvdata(dev);
63 struct drm_device *drm_dev = data;
64 unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
65 struct sti_private *dev_priv = drm_dev->dev_private;
66 struct drm_plane *cursor = NULL;
67 struct drm_plane *primary = NULL;
68 struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
69 unsigned int array_size = compo->data.nb_subdev;
70
71 dev_priv->compo = compo;
61 72
73 /* Register mixer subdev and video subdev first */
62 for (i = 0; i < array_size; i++) { 74 for (i = 0; i < array_size; i++) {
63 switch (desc[i].type) { 75 switch (desc[i].type) {
76 case STI_VID_SUBDEV:
77 compo->vid[vid_id++] =
78 sti_vid_create(compo->dev, desc[i].id,
79 compo->regs + desc[i].offset);
80 break;
64 case STI_MIXER_MAIN_SUBDEV: 81 case STI_MIXER_MAIN_SUBDEV:
65 case STI_MIXER_AUX_SUBDEV: 82 case STI_MIXER_AUX_SUBDEV:
66 compo->mixer[mixer_id++] = 83 compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
68 compo->regs + desc[i].offset); 85 compo->regs + desc[i].offset);
69 break; 86 break;
70 case STI_GPD_SUBDEV: 87 case STI_GPD_SUBDEV:
71 case STI_VID_SUBDEV:
72 case STI_CURSOR_SUBDEV: 88 case STI_CURSOR_SUBDEV:
73 compo->layer[layer_id++] = 89 /* Nothing to do, wait for the second round */
74 sti_layer_create(compo->dev, desc[i].id,
75 compo->regs + desc[i].offset);
76 break; 90 break;
77 default: 91 default:
78 DRM_ERROR("Unknow subdev compoment type\n"); 92 DRM_ERROR("Unknow subdev compoment type\n");
79 return 1; 93 return 1;
80 } 94 }
81
82 } 95 }
83 compo->nb_mixers = mixer_id;
84 compo->nb_layers = layer_id;
85
86 return 0;
87}
88
89static int sti_compositor_bind(struct device *dev, struct device *master,
90 void *data)
91{
92 struct sti_compositor *compo = dev_get_drvdata(dev);
93 struct drm_device *drm_dev = data;
94 unsigned int i, crtc = 0, plane = 0;
95 struct sti_drm_private *dev_priv = drm_dev->dev_private;
96 struct drm_plane *cursor = NULL;
97 struct drm_plane *primary = NULL;
98 96
99 dev_priv->compo = compo; 97 /* Register the other subdevs, create crtc and planes */
100 98 for (i = 0; i < array_size; i++) {
101 for (i = 0; i < compo->nb_layers; i++) { 99 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
102 if (compo->layer[i]) {
103 enum sti_layer_desc desc = compo->layer[i]->desc;
104 enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
105 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
106 100
107 if (crtc < compo->nb_mixers) 101 if (crtc_id < mixer_id)
108 plane_type = DRM_PLANE_TYPE_PRIMARY; 102 plane_type = DRM_PLANE_TYPE_PRIMARY;
109 103
110 switch (type) { 104 switch (desc[i].type) {
111 case STI_CUR: 105 case STI_MIXER_MAIN_SUBDEV:
112 cursor = sti_drm_plane_init(drm_dev, 106 case STI_MIXER_AUX_SUBDEV:
113 compo->layer[i], 107 case STI_VID_SUBDEV:
114 1, DRM_PLANE_TYPE_CURSOR); 108 /* Nothing to do, already done at the first round */
115 break; 109 break;
116 case STI_GDP: 110 case STI_CURSOR_SUBDEV:
117 case STI_VID: 111 cursor = sti_cursor_create(drm_dev, compo->dev,
118 primary = sti_drm_plane_init(drm_dev, 112 desc[i].id,
119 compo->layer[i], 113 compo->regs + desc[i].offset,
120 (1 << compo->nb_mixers) - 1, 114 1);
121 plane_type); 115 if (!cursor) {
122 plane++; 116 DRM_ERROR("Can't create CURSOR plane\n");
123 break; 117 break;
124 case STI_BCK: 118 }
125 case STI_VDP: 119 break;
120 case STI_GPD_SUBDEV:
121 primary = sti_gdp_create(drm_dev, compo->dev,
122 desc[i].id,
123 compo->regs + desc[i].offset,
124 (1 << mixer_id) - 1,
125 plane_type);
126 if (!primary) {
127 DRM_ERROR("Can't create GDP plane\n");
126 break; 128 break;
127 } 129 }
130 break;
131 default:
132 DRM_ERROR("Unknown subdev compoment type\n");
133 return 1;
134 }
128 135
129 /* The first planes are reserved for primary planes*/ 136 /* The first planes are reserved for primary planes*/
130 if (crtc < compo->nb_mixers && primary) { 137 if (crtc_id < mixer_id && primary) {
131 sti_drm_crtc_init(drm_dev, compo->mixer[crtc], 138 sti_crtc_init(drm_dev, compo->mixer[crtc_id],
132 primary, cursor); 139 primary, cursor);
133 crtc++; 140 crtc_id++;
134 cursor = NULL; 141 cursor = NULL;
135 primary = NULL; 142 primary = NULL;
136 }
137 } 143 }
138 } 144 }
139 145
140 drm_vblank_init(drm_dev, crtc); 146 drm_vblank_init(drm_dev, crtc_id);
141 /* Allow usage of vblank without having to call drm_irq_install */ 147 /* Allow usage of vblank without having to call drm_irq_install */
142 drm_dev->irq_enabled = 1; 148 drm_dev->irq_enabled = 1;
143 149
144 DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
145 crtc, plane);
146 DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
147
148 return 0; 150 return 0;
149} 151}
150 152
@@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
179 struct device_node *vtg_np; 181 struct device_node *vtg_np;
180 struct sti_compositor *compo; 182 struct sti_compositor *compo;
181 struct resource *res; 183 struct resource *res;
182 int err;
183 184
184 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); 185 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
185 if (!compo) { 186 if (!compo) {
@@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
187 return -ENOMEM; 188 return -ENOMEM;
188 } 189 }
189 compo->dev = dev; 190 compo->dev = dev;
190 compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb; 191 compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
191 192
192 /* populate data structure depending on compatibility */ 193 /* populate data structure depending on compatibility */
193 BUG_ON(!of_match_node(compositor_of_match, np)->data); 194 BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
251 if (vtg_np) 252 if (vtg_np)
252 compo->vtg_aux = of_vtg_find(vtg_np); 253 compo->vtg_aux = of_vtg_find(vtg_np);
253 254
254 /* Initialize compositor subdevices */
255 err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
256 compo->data.nb_subdev);
257 if (err)
258 return err;
259
260 platform_set_drvdata(pdev, compo); 255 platform_set_drvdata(pdev, compo);
261 256
262 return component_add(&pdev->dev, &sti_compositor_ops); 257 return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44c62cc..1a4a73dab11e 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14 14
15#include "sti_layer.h"
16#include "sti_mixer.h" 15#include "sti_mixer.h"
16#include "sti_plane.h"
17 17
18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/ 18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
19 19
20#define STI_MAX_LAYER 8
21#define STI_MAX_MIXER 2 20#define STI_MAX_MIXER 2
21#define STI_MAX_VID 1
22 22
23enum sti_compositor_subdev_type { 23enum sti_compositor_subdev_type {
24 STI_MIXER_MAIN_SUBDEV, 24 STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@ struct sti_compositor_data {
59 * @rst_main: reset control of the main path 59 * @rst_main: reset control of the main path
60 * @rst_aux: reset control of the aux path 60 * @rst_aux: reset control of the aux path
61 * @mixer: array of mixers 61 * @mixer: array of mixers
62 * @vid: array of vids
62 * @vtg_main: vtg for main data path 63 * @vtg_main: vtg for main data path
63 * @vtg_aux: vtg for auxillary data path 64 * @vtg_aux: vtg for auxillary data path
64 * @layer: array of layers
65 * @nb_mixers: number of mixers for this compositor
66 * @nb_layers: number of layers (GDP,VID,...) for this compositor
67 * @vtg_vblank_nb: callback for VTG VSYNC notification 65 * @vtg_vblank_nb: callback for VTG VSYNC notification
68 */ 66 */
69struct sti_compositor { 67struct sti_compositor {
@@ -77,11 +75,9 @@ struct sti_compositor {
77 struct reset_control *rst_main; 75 struct reset_control *rst_main;
78 struct reset_control *rst_aux; 76 struct reset_control *rst_aux;
79 struct sti_mixer *mixer[STI_MAX_MIXER]; 77 struct sti_mixer *mixer[STI_MAX_MIXER];
78 struct sti_vid *vid[STI_MAX_VID];
80 struct sti_vtg *vtg_main; 79 struct sti_vtg *vtg_main;
81 struct sti_vtg *vtg_aux; 80 struct sti_vtg *vtg_aux;
82 struct sti_layer *layer[STI_MAX_LAYER];
83 int nb_mixers;
84 int nb_layers;
85 struct notifier_block vtg_vblank_nb; 81 struct notifier_block vtg_vblank_nb;
86}; 82};
87 83
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 26e63bf14efe..018ffc970e96 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -15,22 +15,20 @@
15#include <drm/drm_plane_helper.h> 15#include <drm/drm_plane_helper.h>
16 16
17#include "sti_compositor.h" 17#include "sti_compositor.h"
18#include "sti_drm_drv.h" 18#include "sti_crtc.h"
19#include "sti_drm_crtc.h" 19#include "sti_drv.h"
20#include "sti_vid.h"
20#include "sti_vtg.h" 21#include "sti_vtg.h"
21 22
22static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 23static void sti_crtc_enable(struct drm_crtc *crtc)
23{
24 DRM_DEBUG_KMS("\n");
25}
26
27static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
28{ 24{
29 struct sti_mixer *mixer = to_sti_mixer(crtc); 25 struct sti_mixer *mixer = to_sti_mixer(crtc);
30 struct device *dev = mixer->dev; 26 struct device *dev = mixer->dev;
31 struct sti_compositor *compo = dev_get_drvdata(dev); 27 struct sti_compositor *compo = dev_get_drvdata(dev);
32 28
33 mixer->enabled = true; 29 DRM_DEBUG_DRIVER("\n");
30
31 mixer->status = STI_MIXER_READY;
34 32
35 /* Prepare and enable the compo IP clock */ 33 /* Prepare and enable the compo IP clock */
36 if (mixer->id == STI_MIXER_MAIN) { 34 if (mixer->id == STI_MIXER_MAIN) {
@@ -41,45 +39,28 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
41 DRM_INFO("Failed to prepare/enable compo_aux clk\n"); 39 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
42 } 40 }
43 41
44 sti_mixer_clear_all_layers(mixer); 42 drm_crtc_vblank_on(crtc);
45} 43}
46 44
47static void sti_drm_crtc_commit(struct drm_crtc *crtc) 45static void sti_crtc_disabling(struct drm_crtc *crtc)
48{ 46{
49 struct sti_mixer *mixer = to_sti_mixer(crtc); 47 struct sti_mixer *mixer = to_sti_mixer(crtc);
50 struct device *dev = mixer->dev;
51 struct sti_compositor *compo = dev_get_drvdata(dev);
52 struct sti_layer *layer;
53
54 if ((!mixer || !compo)) {
55 DRM_ERROR("Can not find mixer or compositor)\n");
56 return;
57 }
58 48
59 /* get GDP which is reserved to the CRTC FB */ 49 DRM_DEBUG_DRIVER("\n");
60 layer = to_sti_layer(crtc->primary);
61 if (layer)
62 sti_layer_commit(layer);
63 else
64 DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
65
66 /* Enable layer on mixer */
67 if (sti_mixer_set_layer_status(mixer, layer, true))
68 DRM_ERROR("Can not enable layer at mixer\n");
69 50
70 drm_crtc_vblank_on(crtc); 51 mixer->status = STI_MIXER_DISABLING;
71} 52}
72 53
73static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, 54static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
74 const struct drm_display_mode *mode, 55 const struct drm_display_mode *mode,
75 struct drm_display_mode *adjusted_mode) 56 struct drm_display_mode *adjusted_mode)
76{ 57{
77 /* accept the provided drm_display_mode, do not fix it up */ 58 /* accept the provided drm_display_mode, do not fix it up */
78 return true; 59 return true;
79} 60}
80 61
81static int 62static int
82sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) 63sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
83{ 64{
84 struct sti_mixer *mixer = to_sti_mixer(crtc); 65 struct sti_mixer *mixer = to_sti_mixer(crtc);
85 struct device *dev = mixer->dev; 66 struct device *dev = mixer->dev;
@@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
122 103
123 res = sti_mixer_active_video_area(mixer, &crtc->mode); 104 res = sti_mixer_active_video_area(mixer, &crtc->mode);
124 if (res) { 105 if (res) {
125 DRM_ERROR("Can not set active video area\n"); 106 DRM_ERROR("Can't set active video area\n");
126 return -EINVAL; 107 return -EINVAL;
127 } 108 }
128 109
129 return res; 110 return res;
130} 111}
131 112
132static void sti_drm_crtc_disable(struct drm_crtc *crtc) 113static void sti_crtc_disable(struct drm_crtc *crtc)
133{ 114{
134 struct sti_mixer *mixer = to_sti_mixer(crtc); 115 struct sti_mixer *mixer = to_sti_mixer(crtc);
135 struct device *dev = mixer->dev; 116 struct device *dev = mixer->dev;
136 struct sti_compositor *compo = dev_get_drvdata(dev); 117 struct sti_compositor *compo = dev_get_drvdata(dev);
137 118
138 if (!mixer->enabled)
139 return;
140
141 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); 119 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
142 120
143 /* Disable Background */ 121 /* Disable Background */
@@ -154,18 +132,18 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
154 clk_disable_unprepare(compo->clk_compo_aux); 132 clk_disable_unprepare(compo->clk_compo_aux);
155 } 133 }
156 134
157 mixer->enabled = false; 135 mixer->status = STI_MIXER_DISABLED;
158} 136}
159 137
160static void 138static void
161sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 139sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
162{ 140{
163 sti_drm_crtc_prepare(crtc); 141 sti_crtc_enable(crtc);
164 sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 142 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
165} 143}
166 144
167static void sti_drm_atomic_begin(struct drm_crtc *crtc, 145static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
168 struct drm_crtc_state *old_crtc_state) 146 struct drm_crtc_state *old_crtc_state)
169{ 147{
170 struct sti_mixer *mixer = to_sti_mixer(crtc); 148 struct sti_mixer *mixer = to_sti_mixer(crtc);
171 149
@@ -179,47 +157,109 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc,
179 } 157 }
180} 158}
181 159
182static void sti_drm_atomic_flush(struct drm_crtc *crtc, 160static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
183 struct drm_crtc_state *old_crtc_state) 161 struct drm_crtc_state *old_crtc_state)
184{ 162{
163 struct drm_device *drm_dev = crtc->dev;
164 struct sti_mixer *mixer = to_sti_mixer(crtc);
165 struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
166 struct drm_plane *p;
167
168 DRM_DEBUG_DRIVER("\n");
169
170 /* perform plane actions */
171 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
172 struct sti_plane *plane = to_sti_plane(p);
173
174 switch (plane->status) {
175 case STI_PLANE_UPDATED:
176 /* update planes tag as updated */
177 DRM_DEBUG_DRIVER("update plane %s\n",
178 sti_plane_to_str(plane));
179
180 if (sti_mixer_set_plane_depth(mixer, plane)) {
181 DRM_ERROR("Cannot set plane %s depth\n",
182 sti_plane_to_str(plane));
183 break;
184 }
185
186 if (sti_mixer_set_plane_status(mixer, plane, true)) {
187 DRM_ERROR("Cannot enable plane %s at mixer\n",
188 sti_plane_to_str(plane));
189 break;
190 }
191
192 /* if plane is HQVDP_0 then commit the vid[0] */
193 if (plane->desc == STI_HQVDP_0)
194 sti_vid_commit(compo->vid[0], p->state);
195
196 plane->status = STI_PLANE_READY;
197
198 break;
199 case STI_PLANE_DISABLING:
200 /* disabling sequence for planes tag as disabling */
201 DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
202 sti_plane_to_str(plane));
203
204 if (sti_mixer_set_plane_status(mixer, plane, false)) {
205 DRM_ERROR("Cannot disable plane %s at mixer\n",
206 sti_plane_to_str(plane));
207 continue;
208 }
209
210 if (plane->desc == STI_CURSOR)
211 /* tag plane status for disabled */
212 plane->status = STI_PLANE_DISABLED;
213 else
214 /* tag plane status for flushing */
215 plane->status = STI_PLANE_FLUSHING;
216
217 /* if plane is HQVDP_0 then disable the vid[0] */
218 if (plane->desc == STI_HQVDP_0)
219 sti_vid_disable(compo->vid[0]);
220
221 break;
222 default:
223 /* Other status case are not handled */
224 break;
225 }
226 }
185} 227}
186 228
187static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 229static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
188 .dpms = sti_drm_crtc_dpms, 230 .enable = sti_crtc_enable,
189 .prepare = sti_drm_crtc_prepare, 231 .disable = sti_crtc_disabling,
190 .commit = sti_drm_crtc_commit, 232 .mode_fixup = sti_crtc_mode_fixup,
191 .mode_fixup = sti_drm_crtc_mode_fixup,
192 .mode_set = drm_helper_crtc_mode_set, 233 .mode_set = drm_helper_crtc_mode_set,
193 .mode_set_nofb = sti_drm_crtc_mode_set_nofb, 234 .mode_set_nofb = sti_crtc_mode_set_nofb,
194 .mode_set_base = drm_helper_crtc_mode_set_base, 235 .mode_set_base = drm_helper_crtc_mode_set_base,
195 .disable = sti_drm_crtc_disable, 236 .atomic_begin = sti_crtc_atomic_begin,
196 .atomic_begin = sti_drm_atomic_begin, 237 .atomic_flush = sti_crtc_atomic_flush,
197 .atomic_flush = sti_drm_atomic_flush,
198}; 238};
199 239
200static void sti_drm_crtc_destroy(struct drm_crtc *crtc) 240static void sti_crtc_destroy(struct drm_crtc *crtc)
201{ 241{
202 DRM_DEBUG_KMS("\n"); 242 DRM_DEBUG_KMS("\n");
203 drm_crtc_cleanup(crtc); 243 drm_crtc_cleanup(crtc);
204} 244}
205 245
206static int sti_drm_crtc_set_property(struct drm_crtc *crtc, 246static int sti_crtc_set_property(struct drm_crtc *crtc,
207 struct drm_property *property, 247 struct drm_property *property,
208 uint64_t val) 248 uint64_t val)
209{ 249{
210 DRM_DEBUG_KMS("\n"); 250 DRM_DEBUG_KMS("\n");
211 return 0; 251 return 0;
212} 252}
213 253
214int sti_drm_crtc_vblank_cb(struct notifier_block *nb, 254int sti_crtc_vblank_cb(struct notifier_block *nb,
215 unsigned long event, void *data) 255 unsigned long event, void *data)
216{ 256{
217 struct drm_device *drm_dev; 257 struct drm_device *drm_dev;
218 struct sti_compositor *compo = 258 struct sti_compositor *compo =
219 container_of(nb, struct sti_compositor, vtg_vblank_nb); 259 container_of(nb, struct sti_compositor, vtg_vblank_nb);
220 int *crtc = data; 260 int *crtc = data;
221 unsigned long flags; 261 unsigned long flags;
222 struct sti_drm_private *priv; 262 struct sti_private *priv;
223 263
224 drm_dev = compo->mixer[*crtc]->drm_crtc.dev; 264 drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
225 priv = drm_dev->dev_private; 265 priv = drm_dev->dev_private;
@@ -235,21 +275,38 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
235 spin_lock_irqsave(&drm_dev->event_lock, flags); 275 spin_lock_irqsave(&drm_dev->event_lock, flags);
236 if (compo->mixer[*crtc]->pending_event) { 276 if (compo->mixer[*crtc]->pending_event) {
237 drm_send_vblank_event(drm_dev, -1, 277 drm_send_vblank_event(drm_dev, -1,
238 compo->mixer[*crtc]->pending_event); 278 compo->mixer[*crtc]->pending_event);
239 drm_vblank_put(drm_dev, *crtc); 279 drm_vblank_put(drm_dev, *crtc);
240 compo->mixer[*crtc]->pending_event = NULL; 280 compo->mixer[*crtc]->pending_event = NULL;
241 } 281 }
242 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 282 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
243 283
284 if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
285 struct drm_plane *p;
286
287 /* Disable mixer only if all overlay planes (GDP and VDP)
288 * are disabled */
289 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
290 struct sti_plane *plane = to_sti_plane(p);
291
292 if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
293 if (plane->status != STI_PLANE_DISABLED)
294 return 0;
295 }
296 sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
297 }
298
244 return 0; 299 return 0;
245} 300}
246 301
247int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) 302int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
248{ 303{
249 struct sti_drm_private *dev_priv = dev->dev_private; 304 struct sti_private *dev_priv = dev->dev_private;
250 struct sti_compositor *compo = dev_priv->compo; 305 struct sti_compositor *compo = dev_priv->compo;
251 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 306 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
252 307
308 DRM_DEBUG_DRIVER("\n");
309
253 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? 310 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
254 compo->vtg_main : compo->vtg_aux, 311 compo->vtg_main : compo->vtg_aux,
255 vtg_vblank_nb, crtc)) { 312 vtg_vblank_nb, crtc)) {
@@ -259,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
259 316
260 return 0; 317 return 0;
261} 318}
262EXPORT_SYMBOL(sti_drm_crtc_enable_vblank); 319EXPORT_SYMBOL(sti_crtc_enable_vblank);
263 320
264void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) 321void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
265{ 322{
266 struct sti_drm_private *priv = dev->dev_private; 323 struct sti_private *priv = drm_dev->dev_private;
267 struct sti_compositor *compo = priv->compo; 324 struct sti_compositor *compo = priv->compo;
268 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 325 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
269 326
@@ -275,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
275 332
276 /* free the resources of the pending requests */ 333 /* free the resources of the pending requests */
277 if (compo->mixer[crtc]->pending_event) { 334 if (compo->mixer[crtc]->pending_event) {
278 drm_vblank_put(dev, crtc); 335 drm_vblank_put(drm_dev, crtc);
279 compo->mixer[crtc]->pending_event = NULL; 336 compo->mixer[crtc]->pending_event = NULL;
280 } 337 }
281} 338}
282EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); 339EXPORT_SYMBOL(sti_crtc_disable_vblank);
283 340
284static struct drm_crtc_funcs sti_crtc_funcs = { 341static struct drm_crtc_funcs sti_crtc_funcs = {
285 .set_config = drm_atomic_helper_set_config, 342 .set_config = drm_atomic_helper_set_config,
286 .page_flip = drm_atomic_helper_page_flip, 343 .page_flip = drm_atomic_helper_page_flip,
287 .destroy = sti_drm_crtc_destroy, 344 .destroy = sti_crtc_destroy,
288 .set_property = sti_drm_crtc_set_property, 345 .set_property = sti_crtc_set_property,
289 .reset = drm_atomic_helper_crtc_reset, 346 .reset = drm_atomic_helper_crtc_reset,
290 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 347 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
291 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 348 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
292}; 349};
293 350
294bool sti_drm_crtc_is_main(struct drm_crtc *crtc) 351bool sti_crtc_is_main(struct drm_crtc *crtc)
295{ 352{
296 struct sti_mixer *mixer = to_sti_mixer(crtc); 353 struct sti_mixer *mixer = to_sti_mixer(crtc);
297 354
@@ -300,18 +357,18 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
300 357
301 return false; 358 return false;
302} 359}
303EXPORT_SYMBOL(sti_drm_crtc_is_main); 360EXPORT_SYMBOL(sti_crtc_is_main);
304 361
305int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 362int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
306 struct drm_plane *primary, struct drm_plane *cursor) 363 struct drm_plane *primary, struct drm_plane *cursor)
307{ 364{
308 struct drm_crtc *crtc = &mixer->drm_crtc; 365 struct drm_crtc *crtc = &mixer->drm_crtc;
309 int res; 366 int res;
310 367
311 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 368 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
312 &sti_crtc_funcs); 369 &sti_crtc_funcs);
313 if (res) { 370 if (res) {
314 DRM_ERROR("Can not initialze CRTC\n"); 371 DRM_ERROR("Can't initialze CRTC\n");
315 return -EINVAL; 372 return -EINVAL;
316 } 373 }
317 374
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 000000000000..51963e6ddbe7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_CRTC_H_
8#define _STI_CRTC_H_
9
10#include <drm/drmP.h>
11
12struct sti_mixer;
13
14int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
15 struct drm_plane *primary, struct drm_plane *cursor);
16int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
17void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
18int sti_crtc_vblank_cb(struct notifier_block *nb,
19 unsigned long event, void *data);
20bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
21
22#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee60bf7..dd1032195051 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
7 */ 7 */
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9 9
10#include <drm/drm_atomic_helper.h>
11#include <drm/drm_fb_cma_helper.h>
12#include <drm/drm_gem_cma_helper.h>
13#include <drm/drm_plane_helper.h>
14
15#include "sti_compositor.h"
10#include "sti_cursor.h" 16#include "sti_cursor.h"
11#include "sti_layer.h" 17#include "sti_plane.h"
12#include "sti_vtg.h" 18#include "sti_vtg.h"
13 19
14/* Registers */ 20/* Registers */
@@ -42,15 +48,19 @@ struct dma_pixmap {
42/** 48/**
43 * STI Cursor structure 49 * STI Cursor structure
44 * 50 *
45 * @layer: layer structure 51 * @sti_plane: sti_plane structure
46 * @width: cursor width 52 * @dev: driver device
47 * @height: cursor height 53 * @regs: cursor registers
48 * @clut: color look up table 54 * @width: cursor width
49 * @clut_paddr: color look up table physical address 55 * @height: cursor height
50 * @pixmap: pixmap dma buffer (clut8-format cursor) 56 * @clut: color look up table
57 * @clut_paddr: color look up table physical address
58 * @pixmap: pixmap dma buffer (clut8-format cursor)
51 */ 59 */
52struct sti_cursor { 60struct sti_cursor {
53 struct sti_layer layer; 61 struct sti_plane plane;
62 struct device *dev;
63 void __iomem *regs;
54 unsigned int width; 64 unsigned int width;
55 unsigned int height; 65 unsigned int height;
56 unsigned short *clut; 66 unsigned short *clut;
@@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = {
62 DRM_FORMAT_ARGB8888, 72 DRM_FORMAT_ARGB8888,
63}; 73};
64 74
65#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer) 75#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
66
67static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
68{
69 return cursor_supported_formats;
70}
71
72static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
73{
74 return ARRAY_SIZE(cursor_supported_formats);
75}
76 76
77static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) 77static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
78{ 78{
79 struct sti_cursor *cursor = to_sti_cursor(layer);
80 u32 *src = layer->vaddr;
81 u8 *dst = cursor->pixmap.base; 79 u8 *dst = cursor->pixmap.base;
82 unsigned int i, j; 80 unsigned int i, j;
83 u32 a, r, g, b; 81 u32 a, r, g, b;
@@ -96,127 +94,155 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
96 } 94 }
97} 95}
98 96
99static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare) 97static void sti_cursor_init(struct sti_cursor *cursor)
100{ 98{
101 struct sti_cursor *cursor = to_sti_cursor(layer); 99 unsigned short *base = cursor->clut;
102 struct drm_display_mode *mode = layer->mode; 100 unsigned int a, r, g, b;
101
102 /* Assign CLUT values, ARGB444 format */
103 for (a = 0; a < 4; a++)
104 for (r = 0; r < 4; r++)
105 for (g = 0; g < 4; g++)
106 for (b = 0; b < 4; b++)
107 *base++ = (a * 5) << 12 |
108 (r * 5) << 8 |
109 (g * 5) << 4 |
110 (b * 5);
111}
112
113static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
114 struct drm_plane_state *oldstate)
115{
116 struct drm_plane_state *state = drm_plane->state;
117 struct sti_plane *plane = to_sti_plane(drm_plane);
118 struct sti_cursor *cursor = to_sti_cursor(plane);
119 struct drm_crtc *crtc = state->crtc;
120 struct sti_mixer *mixer = to_sti_mixer(crtc);
121 struct drm_framebuffer *fb = state->fb;
122 struct drm_display_mode *mode = &crtc->mode;
123 int dst_x = state->crtc_x;
124 int dst_y = state->crtc_y;
125 int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
126 int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
127 /* src_x are in 16.16 format */
128 int src_w = state->src_w >> 16;
129 int src_h = state->src_h >> 16;
130 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
131 struct drm_gem_cma_object *cma_obj;
103 u32 y, x; 132 u32 y, x;
104 u32 val; 133 u32 val;
105 134
106 DRM_DEBUG_DRIVER("\n"); 135 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
136 crtc->base.id, sti_mixer_to_str(mixer),
137 drm_plane->base.id, sti_plane_to_str(plane));
138 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
107 139
108 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 140 dev_dbg(cursor->dev, "%s %s\n", __func__,
141 sti_plane_to_str(plane));
109 142
110 if (layer->src_w < STI_CURS_MIN_SIZE || 143 if (src_w < STI_CURS_MIN_SIZE ||
111 layer->src_h < STI_CURS_MIN_SIZE || 144 src_h < STI_CURS_MIN_SIZE ||
112 layer->src_w > STI_CURS_MAX_SIZE || 145 src_w > STI_CURS_MAX_SIZE ||
113 layer->src_h > STI_CURS_MAX_SIZE) { 146 src_h > STI_CURS_MAX_SIZE) {
114 DRM_ERROR("Invalid cursor size (%dx%d)\n", 147 DRM_ERROR("Invalid cursor size (%dx%d)\n",
115 layer->src_w, layer->src_h); 148 src_w, src_h);
116 return -EINVAL; 149 return;
117 } 150 }
118 151
119 /* If the cursor size has changed, re-allocated the pixmap */ 152 /* If the cursor size has changed, re-allocated the pixmap */
120 if (!cursor->pixmap.base || 153 if (!cursor->pixmap.base ||
121 (cursor->width != layer->src_w) || 154 (cursor->width != src_w) ||
122 (cursor->height != layer->src_h)) { 155 (cursor->height != src_h)) {
123 cursor->width = layer->src_w; 156 cursor->width = src_w;
124 cursor->height = layer->src_h; 157 cursor->height = src_h;
125 158
126 if (cursor->pixmap.base) 159 if (cursor->pixmap.base)
127 dma_free_writecombine(layer->dev, 160 dma_free_writecombine(cursor->dev,
128 cursor->pixmap.size, 161 cursor->pixmap.size,
129 cursor->pixmap.base, 162 cursor->pixmap.base,
130 cursor->pixmap.paddr); 163 cursor->pixmap.paddr);
131 164
132 cursor->pixmap.size = cursor->width * cursor->height; 165 cursor->pixmap.size = cursor->width * cursor->height;
133 166
134 cursor->pixmap.base = dma_alloc_writecombine(layer->dev, 167 cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
135 cursor->pixmap.size, 168 cursor->pixmap.size,
136 &cursor->pixmap.paddr, 169 &cursor->pixmap.paddr,
137 GFP_KERNEL | GFP_DMA); 170 GFP_KERNEL | GFP_DMA);
138 if (!cursor->pixmap.base) { 171 if (!cursor->pixmap.base) {
139 DRM_ERROR("Failed to allocate memory for pixmap\n"); 172 DRM_ERROR("Failed to allocate memory for pixmap\n");
140 return -ENOMEM; 173 return;
141 } 174 }
142 } 175 }
143 176
177 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
178 if (!cma_obj) {
179 DRM_ERROR("Can't get CMA GEM object for fb\n");
180 return;
181 }
182
144 /* Convert ARGB8888 to CLUT8 */ 183 /* Convert ARGB8888 to CLUT8 */
145 sti_cursor_argb8888_to_clut8(layer); 184 sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
146 185
147 /* AWS and AWE depend on the mode */ 186 /* AWS and AWE depend on the mode */
148 y = sti_vtg_get_line_number(*mode, 0); 187 y = sti_vtg_get_line_number(*mode, 0);
149 x = sti_vtg_get_pixel_number(*mode, 0); 188 x = sti_vtg_get_pixel_number(*mode, 0);
150 val = y << 16 | x; 189 val = y << 16 | x;
151 writel(val, layer->regs + CUR_AWS); 190 writel(val, cursor->regs + CUR_AWS);
152 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); 191 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
153 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); 192 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
154 val = y << 16 | x; 193 val = y << 16 | x;
155 writel(val, layer->regs + CUR_AWE); 194 writel(val, cursor->regs + CUR_AWE);
156 195
157 if (first_prepare) { 196 if (first_prepare) {
158 /* Set and fetch CLUT */ 197 /* Set and fetch CLUT */
159 writel(cursor->clut_paddr, layer->regs + CUR_CML); 198 writel(cursor->clut_paddr, cursor->regs + CUR_CML);
160 writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL); 199 writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
161 } 200 }
162 201
163 return 0;
164}
165
166static int sti_cursor_commit_layer(struct sti_layer *layer)
167{
168 struct sti_cursor *cursor = to_sti_cursor(layer);
169 struct drm_display_mode *mode = layer->mode;
170 u32 ydo, xdo;
171
172 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
173
174 /* Set memory location, size, and position */ 202 /* Set memory location, size, and position */
175 writel(cursor->pixmap.paddr, layer->regs + CUR_PML); 203 writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
176 writel(cursor->width, layer->regs + CUR_PMP); 204 writel(cursor->width, cursor->regs + CUR_PMP);
177 writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE); 205 writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
178 206
179 ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 207 y = sti_vtg_get_line_number(*mode, dst_y);
180 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y); 208 x = sti_vtg_get_pixel_number(*mode, dst_y);
181 writel((ydo << 16) | xdo, layer->regs + CUR_VPO); 209 writel((y << 16) | x, cursor->regs + CUR_VPO);
182 210
183 return 0; 211 plane->status = STI_PLANE_UPDATED;
184} 212}
185 213
186static int sti_cursor_disable_layer(struct sti_layer *layer) 214static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
215 struct drm_plane_state *oldstate)
187{ 216{
188 return 0; 217 struct sti_plane *plane = to_sti_plane(drm_plane);
189} 218 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
190 219
191static void sti_cursor_init(struct sti_layer *layer) 220 if (!drm_plane->crtc) {
192{ 221 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
193 struct sti_cursor *cursor = to_sti_cursor(layer); 222 drm_plane->base.id);
194 unsigned short *base = cursor->clut; 223 return;
195 unsigned int a, r, g, b; 224 }
196 225
197 /* Assign CLUT values, ARGB444 format */ 226 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
198 for (a = 0; a < 4; a++) 227 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
199 for (r = 0; r < 4; r++) 228 drm_plane->base.id, sti_plane_to_str(plane));
200 for (g = 0; g < 4; g++) 229
201 for (b = 0; b < 4; b++) 230 plane->status = STI_PLANE_DISABLING;
202 *base++ = (a * 5) << 12 |
203 (r * 5) << 8 |
204 (g * 5) << 4 |
205 (b * 5);
206} 231}
207 232
208static const struct sti_layer_funcs cursor_ops = { 233static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
209 .get_formats = sti_cursor_get_formats, 234 .atomic_update = sti_cursor_atomic_update,
210 .get_nb_formats = sti_cursor_get_nb_formats, 235 .atomic_disable = sti_cursor_atomic_disable,
211 .init = sti_cursor_init,
212 .prepare = sti_cursor_prepare_layer,
213 .commit = sti_cursor_commit_layer,
214 .disable = sti_cursor_disable_layer,
215}; 236};
216 237
217struct sti_layer *sti_cursor_create(struct device *dev) 238struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
239 struct device *dev, int desc,
240 void __iomem *baseaddr,
241 unsigned int possible_crtcs)
218{ 242{
219 struct sti_cursor *cursor; 243 struct sti_cursor *cursor;
244 size_t size;
245 int res;
220 246
221 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL); 247 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
222 if (!cursor) { 248 if (!cursor) {
@@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev)
225 } 251 }
226 252
227 /* Allocate clut buffer */ 253 /* Allocate clut buffer */
228 cursor->clut = dma_alloc_writecombine(dev, 254 size = 0x100 * sizeof(unsigned short);
229 0x100 * sizeof(unsigned short), 255 cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
230 &cursor->clut_paddr, 256 GFP_KERNEL | GFP_DMA);
231 GFP_KERNEL | GFP_DMA);
232 257
233 if (!cursor->clut) { 258 if (!cursor->clut) {
234 DRM_ERROR("Failed to allocate memory for cursor clut\n"); 259 DRM_ERROR("Failed to allocate memory for cursor clut\n");
235 devm_kfree(dev, cursor); 260 goto err_clut;
236 return NULL; 261 }
262
263 cursor->dev = dev;
264 cursor->regs = baseaddr;
265 cursor->plane.desc = desc;
266 cursor->plane.status = STI_PLANE_DISABLED;
267
268 sti_cursor_init(cursor);
269
270 res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
271 possible_crtcs,
272 &sti_plane_helpers_funcs,
273 cursor_supported_formats,
274 ARRAY_SIZE(cursor_supported_formats),
275 DRM_PLANE_TYPE_CURSOR);
276 if (res) {
277 DRM_ERROR("Failed to initialize universal plane\n");
278 goto err_plane;
237 } 279 }
238 280
239 cursor->layer.ops = &cursor_ops; 281 drm_plane_helper_add(&cursor->plane.drm_plane,
282 &sti_cursor_helpers_funcs);
283
284 sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
285
286 return &cursor->plane.drm_plane;
240 287
241 return (struct sti_layer *)cursor; 288err_plane:
289 dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
290err_clut:
291 devm_kfree(dev, cursor);
292 return NULL;
242} 293}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c9827404f27..2ee5c10e8b33 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
7#ifndef _STI_CURSOR_H_ 7#ifndef _STI_CURSOR_H_
8#define _STI_CURSOR_H_ 8#define _STI_CURSOR_H_
9 9
10struct sti_layer *sti_cursor_create(struct device *dev); 10struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
11 struct device *dev, int desc,
12 void __iomem *baseaddr,
13 unsigned int possible_crtcs);
11 14
12#endif 15#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b14f017..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_CRTC_H_
8#define _STI_DRM_CRTC_H_
9
10#include <drm/drmP.h>
11
12struct sti_mixer;
13
14int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
15 struct drm_plane *primary, struct drm_plane *cursor);
16int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
17void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
18int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
19 unsigned long event, void *data);
20bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
21
22#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed43dda3..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_atomic_helper.h>
11#include <drm/drm_plane_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_drm_drv.h"
15#include "sti_drm_plane.h"
16#include "sti_vtg.h"
17
18enum sti_layer_desc sti_layer_default_zorder[] = {
19 STI_GDP_0,
20 STI_VID_0,
21 STI_GDP_1,
22 STI_VID_1,
23 STI_GDP_2,
24 STI_GDP_3,
25};
26
27/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
28
29static int
30sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
31 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
32 unsigned int crtc_w, unsigned int crtc_h,
33 uint32_t src_x, uint32_t src_y,
34 uint32_t src_w, uint32_t src_h)
35{
36 struct sti_layer *layer = to_sti_layer(plane);
37 struct sti_mixer *mixer = to_sti_mixer(crtc);
38 int res;
39
40 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
41 crtc->base.id, sti_mixer_to_str(mixer),
42 plane->base.id, sti_layer_to_str(layer));
43 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
44
45 res = sti_mixer_set_layer_depth(mixer, layer);
46 if (res) {
47 DRM_ERROR("Can not set layer depth\n");
48 return res;
49 }
50
51 /* src_x are in 16.16 format. */
52 res = sti_layer_prepare(layer, crtc, fb,
53 &crtc->mode, mixer->id,
54 crtc_x, crtc_y, crtc_w, crtc_h,
55 src_x >> 16, src_y >> 16,
56 src_w >> 16, src_h >> 16);
57 if (res) {
58 DRM_ERROR("Layer prepare failed\n");
59 return res;
60 }
61
62 res = sti_layer_commit(layer);
63 if (res) {
64 DRM_ERROR("Layer commit failed\n");
65 return res;
66 }
67
68 res = sti_mixer_set_layer_status(mixer, layer, true);
69 if (res) {
70 DRM_ERROR("Can not enable layer at mixer\n");
71 return res;
72 }
73
74 return 0;
75}
76
77static int sti_drm_disable_plane(struct drm_plane *plane)
78{
79 struct sti_layer *layer;
80 struct sti_mixer *mixer;
81 int lay_res, mix_res;
82
83 if (!plane->crtc) {
84 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
85 return 0;
86 }
87 layer = to_sti_layer(plane);
88 mixer = to_sti_mixer(plane->crtc);
89
90 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
91 plane->crtc->base.id, sti_mixer_to_str(mixer),
92 plane->base.id, sti_layer_to_str(layer));
93
94 /* Disable layer at mixer level */
95 mix_res = sti_mixer_set_layer_status(mixer, layer, false);
96 if (mix_res)
97 DRM_ERROR("Can not disable layer at mixer\n");
98
99 /* Wait a while to be sure that a Vsync event is received */
100 msleep(WAIT_NEXT_VSYNC_MS);
101
102 /* Then disable layer itself */
103 lay_res = sti_layer_disable(layer);
104 if (lay_res)
105 DRM_ERROR("Layer disable failed\n");
106
107 if (lay_res || mix_res)
108 return -EINVAL;
109
110 return 0;
111}
112
113static void sti_drm_plane_destroy(struct drm_plane *plane)
114{
115 DRM_DEBUG_DRIVER("\n");
116
117 drm_plane_helper_disable(plane);
118 drm_plane_cleanup(plane);
119}
120
121static int sti_drm_plane_set_property(struct drm_plane *plane,
122 struct drm_property *property,
123 uint64_t val)
124{
125 struct drm_device *dev = plane->dev;
126 struct sti_drm_private *private = dev->dev_private;
127 struct sti_layer *layer = to_sti_layer(plane);
128
129 DRM_DEBUG_DRIVER("\n");
130
131 if (property == private->plane_zorder_property) {
132 layer->zorder = val;
133 return 0;
134 }
135
136 return -EINVAL;
137}
138
139static struct drm_plane_funcs sti_drm_plane_funcs = {
140 .update_plane = drm_atomic_helper_update_plane,
141 .disable_plane = drm_atomic_helper_disable_plane,
142 .destroy = sti_drm_plane_destroy,
143 .set_property = sti_drm_plane_set_property,
144 .reset = drm_atomic_helper_plane_reset,
145 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
146 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
147};
148
149static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
150 struct drm_framebuffer *fb,
151 const struct drm_plane_state *new_state)
152{
153 return 0;
154}
155
156static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
157 struct drm_framebuffer *fb,
158 const struct drm_plane_state *old_fb)
159{
160}
161
162static int sti_drm_plane_atomic_check(struct drm_plane *plane,
163 struct drm_plane_state *state)
164{
165 return 0;
166}
167
168static void sti_drm_plane_atomic_update(struct drm_plane *plane,
169 struct drm_plane_state *oldstate)
170{
171 struct drm_plane_state *state = plane->state;
172
173 sti_drm_update_plane(plane, state->crtc, state->fb,
174 state->crtc_x, state->crtc_y,
175 state->crtc_w, state->crtc_h,
176 state->src_x, state->src_y,
177 state->src_w, state->src_h);
178}
179
180static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
181 struct drm_plane_state *oldstate)
182{
183 sti_drm_disable_plane(plane);
184}
185
186static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
187 .prepare_fb = sti_drm_plane_prepare_fb,
188 .cleanup_fb = sti_drm_plane_cleanup_fb,
189 .atomic_check = sti_drm_plane_atomic_check,
190 .atomic_update = sti_drm_plane_atomic_update,
191 .atomic_disable = sti_drm_plane_atomic_disable,
192};
193
194static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
195 uint64_t default_val)
196{
197 struct drm_device *dev = plane->dev;
198 struct sti_drm_private *private = dev->dev_private;
199 struct drm_property *prop;
200 struct sti_layer *layer = to_sti_layer(plane);
201
202 prop = private->plane_zorder_property;
203 if (!prop) {
204 prop = drm_property_create_range(dev, 0, "zpos", 0,
205 GAM_MIXER_NB_DEPTH_LEVEL - 1);
206 if (!prop)
207 return;
208
209 private->plane_zorder_property = prop;
210 }
211
212 drm_object_attach_property(&plane->base, prop, default_val);
213 layer->zorder = default_val;
214}
215
216struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
217 struct sti_layer *layer,
218 unsigned int possible_crtcs,
219 enum drm_plane_type type)
220{
221 int err, i;
222 uint64_t default_zorder = 0;
223
224 err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
225 &sti_drm_plane_funcs,
226 sti_layer_get_formats(layer),
227 sti_layer_get_nb_formats(layer), type);
228 if (err) {
229 DRM_ERROR("Failed to initialize plane\n");
230 return NULL;
231 }
232
233 drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
234
235 for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
236 if (sti_layer_default_zorder[i] == layer->desc)
237 break;
238
239 default_zorder = i + 1;
240
241 if (type == DRM_PLANE_TYPE_OVERLAY)
242 sti_drm_plane_attach_zorder_property(&layer->plane,
243 default_zorder);
244
245 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
246 layer->plane.base.id,
247 sti_layer_to_str(layer), default_zorder);
248
249 return &layer->plane;
250}
251EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f191839f2a7..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_PLANE_H_
8#define _STI_DRM_PLANE_H_
9
10#include <drm/drmP.h>
11
12struct sti_layer;
13
14struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
15 struct sti_layer *layer,
16 unsigned int possible_crtcs,
17 enum drm_plane_type type);
18#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 59d558b400b3..6f4af6a8ba1b 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -18,8 +18,8 @@
18#include <drm/drm_gem_cma_helper.h> 18#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_fb_cma_helper.h> 19#include <drm/drm_fb_cma_helper.h>
20 20
21#include "sti_drm_drv.h" 21#include "sti_crtc.h"
22#include "sti_drm_crtc.h" 22#include "sti_drv.h"
23 23
24#define DRIVER_NAME "sti" 24#define DRIVER_NAME "sti"
25#define DRIVER_DESC "STMicroelectronics SoC DRM" 25#define DRIVER_DESC "STMicroelectronics SoC DRM"
@@ -30,15 +30,15 @@
30#define STI_MAX_FB_HEIGHT 4096 30#define STI_MAX_FB_HEIGHT 4096
31#define STI_MAX_FB_WIDTH 4096 31#define STI_MAX_FB_WIDTH 4096
32 32
33static void sti_drm_atomic_schedule(struct sti_drm_private *private, 33static void sti_atomic_schedule(struct sti_private *private,
34 struct drm_atomic_state *state) 34 struct drm_atomic_state *state)
35{ 35{
36 private->commit.state = state; 36 private->commit.state = state;
37 schedule_work(&private->commit.work); 37 schedule_work(&private->commit.work);
38} 38}
39 39
40static void sti_drm_atomic_complete(struct sti_drm_private *private, 40static void sti_atomic_complete(struct sti_private *private,
41 struct drm_atomic_state *state) 41 struct drm_atomic_state *state)
42{ 42{
43 struct drm_device *drm = private->drm_dev; 43 struct drm_device *drm = private->drm_dev;
44 44
@@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private,
68 drm_atomic_state_free(state); 68 drm_atomic_state_free(state);
69} 69}
70 70
71static void sti_drm_atomic_work(struct work_struct *work) 71static void sti_atomic_work(struct work_struct *work)
72{ 72{
73 struct sti_drm_private *private = container_of(work, 73 struct sti_private *private = container_of(work,
74 struct sti_drm_private, commit.work); 74 struct sti_private, commit.work);
75 75
76 sti_drm_atomic_complete(private, private->commit.state); 76 sti_atomic_complete(private, private->commit.state);
77} 77}
78 78
79static int sti_drm_atomic_commit(struct drm_device *drm, 79static int sti_atomic_commit(struct drm_device *drm,
80 struct drm_atomic_state *state, bool async) 80 struct drm_atomic_state *state, bool async)
81{ 81{
82 struct sti_drm_private *private = drm->dev_private; 82 struct sti_private *private = drm->dev_private;
83 int err; 83 int err;
84 84
85 err = drm_atomic_helper_prepare_planes(drm, state); 85 err = drm_atomic_helper_prepare_planes(drm, state);
@@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm,
99 drm_atomic_helper_swap_state(drm, state); 99 drm_atomic_helper_swap_state(drm, state);
100 100
101 if (async) 101 if (async)
102 sti_drm_atomic_schedule(private, state); 102 sti_atomic_schedule(private, state);
103 else 103 else
104 sti_drm_atomic_complete(private, state); 104 sti_atomic_complete(private, state);
105 105
106 mutex_unlock(&private->commit.lock); 106 mutex_unlock(&private->commit.lock);
107 return 0; 107 return 0;
108} 108}
109 109
110static struct drm_mode_config_funcs sti_drm_mode_config_funcs = { 110static struct drm_mode_config_funcs sti_mode_config_funcs = {
111 .fb_create = drm_fb_cma_create, 111 .fb_create = drm_fb_cma_create,
112 .atomic_check = drm_atomic_helper_check, 112 .atomic_check = drm_atomic_helper_check,
113 .atomic_commit = sti_drm_atomic_commit, 113 .atomic_commit = sti_atomic_commit,
114}; 114};
115 115
116static void sti_drm_mode_config_init(struct drm_device *dev) 116static void sti_mode_config_init(struct drm_device *dev)
117{ 117{
118 dev->mode_config.min_width = 0; 118 dev->mode_config.min_width = 0;
119 dev->mode_config.min_height = 0; 119 dev->mode_config.min_height = 0;
@@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev)
126 dev->mode_config.max_width = STI_MAX_FB_HEIGHT; 126 dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
127 dev->mode_config.max_height = STI_MAX_FB_WIDTH; 127 dev->mode_config.max_height = STI_MAX_FB_WIDTH;
128 128
129 dev->mode_config.funcs = &sti_drm_mode_config_funcs; 129 dev->mode_config.funcs = &sti_mode_config_funcs;
130} 130}
131 131
132static int sti_drm_load(struct drm_device *dev, unsigned long flags) 132static int sti_load(struct drm_device *dev, unsigned long flags)
133{ 133{
134 struct sti_drm_private *private; 134 struct sti_private *private;
135 int ret; 135 int ret;
136 136
137 private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL); 137 private = kzalloc(sizeof(*private), GFP_KERNEL);
138 if (!private) { 138 if (!private) {
139 DRM_ERROR("Failed to allocate private\n"); 139 DRM_ERROR("Failed to allocate private\n");
140 return -ENOMEM; 140 return -ENOMEM;
@@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
143 private->drm_dev = dev; 143 private->drm_dev = dev;
144 144
145 mutex_init(&private->commit.lock); 145 mutex_init(&private->commit.lock);
146 INIT_WORK(&private->commit.work, sti_drm_atomic_work); 146 INIT_WORK(&private->commit.work, sti_atomic_work);
147 147
148 drm_mode_config_init(dev); 148 drm_mode_config_init(dev);
149 drm_kms_helper_poll_init(dev); 149 drm_kms_helper_poll_init(dev);
150 150
151 sti_drm_mode_config_init(dev); 151 sti_mode_config_init(dev);
152 152
153 ret = component_bind_all(dev->dev, dev); 153 ret = component_bind_all(dev->dev, dev);
154 if (ret) { 154 if (ret) {
@@ -162,13 +162,13 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
162 162
163#ifdef CONFIG_DRM_STI_FBDEV 163#ifdef CONFIG_DRM_STI_FBDEV
164 drm_fbdev_cma_init(dev, 32, 164 drm_fbdev_cma_init(dev, 32,
165 dev->mode_config.num_crtc, 165 dev->mode_config.num_crtc,
166 dev->mode_config.num_connector); 166 dev->mode_config.num_connector);
167#endif 167#endif
168 return 0; 168 return 0;
169} 169}
170 170
171static const struct file_operations sti_drm_driver_fops = { 171static const struct file_operations sti_driver_fops = {
172 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
173 .open = drm_open, 173 .open = drm_open,
174 .mmap = drm_gem_cma_mmap, 174 .mmap = drm_gem_cma_mmap,
@@ -181,33 +181,33 @@ static const struct file_operations sti_drm_driver_fops = {
181 .release = drm_release, 181 .release = drm_release,
182}; 182};
183 183
184static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev, 184static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
185 struct drm_gem_object *obj, 185 struct drm_gem_object *obj,
186 int flags) 186 int flags)
187{ 187{
188 /* we want to be able to write in mmapped buffer */ 188 /* we want to be able to write in mmapped buffer */
189 flags |= O_RDWR; 189 flags |= O_RDWR;
190 return drm_gem_prime_export(dev, obj, flags); 190 return drm_gem_prime_export(dev, obj, flags);
191} 191}
192 192
193static struct drm_driver sti_drm_driver = { 193static struct drm_driver sti_driver = {
194 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 194 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
195 DRIVER_GEM | DRIVER_PRIME, 195 DRIVER_GEM | DRIVER_PRIME,
196 .load = sti_drm_load, 196 .load = sti_load,
197 .gem_free_object = drm_gem_cma_free_object, 197 .gem_free_object = drm_gem_cma_free_object,
198 .gem_vm_ops = &drm_gem_cma_vm_ops, 198 .gem_vm_ops = &drm_gem_cma_vm_ops,
199 .dumb_create = drm_gem_cma_dumb_create, 199 .dumb_create = drm_gem_cma_dumb_create,
200 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 200 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
201 .dumb_destroy = drm_gem_dumb_destroy, 201 .dumb_destroy = drm_gem_dumb_destroy,
202 .fops = &sti_drm_driver_fops, 202 .fops = &sti_driver_fops,
203 203
204 .get_vblank_counter = drm_vblank_count, 204 .get_vblank_counter = drm_vblank_count,
205 .enable_vblank = sti_drm_crtc_enable_vblank, 205 .enable_vblank = sti_crtc_enable_vblank,
206 .disable_vblank = sti_drm_crtc_disable_vblank, 206 .disable_vblank = sti_crtc_disable_vblank,
207 207
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210 .gem_prime_export = sti_drm_gem_prime_export, 210 .gem_prime_export = sti_gem_prime_export,
211 .gem_prime_import = drm_gem_prime_import, 211 .gem_prime_import = drm_gem_prime_import,
212 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 212 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
213 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 213 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data)
227 return dev->of_node == data; 227 return dev->of_node == data;
228} 228}
229 229
230static int sti_drm_bind(struct device *dev) 230static int sti_bind(struct device *dev)
231{ 231{
232 return drm_platform_init(&sti_drm_driver, to_platform_device(dev)); 232 return drm_platform_init(&sti_driver, to_platform_device(dev));
233} 233}
234 234
235static void sti_drm_unbind(struct device *dev) 235static void sti_unbind(struct device *dev)
236{ 236{
237 drm_put_dev(dev_get_drvdata(dev)); 237 drm_put_dev(dev_get_drvdata(dev));
238} 238}
239 239
240static const struct component_master_ops sti_drm_ops = { 240static const struct component_master_ops sti_ops = {
241 .bind = sti_drm_bind, 241 .bind = sti_bind,
242 .unbind = sti_drm_unbind, 242 .unbind = sti_unbind,
243}; 243};
244 244
245static int sti_drm_master_probe(struct platform_device *pdev) 245static int sti_platform_probe(struct platform_device *pdev)
246{ 246{
247 struct device *dev = &pdev->dev; 247 struct device *dev = &pdev->dev;
248 struct device_node *node = dev->parent->of_node; 248 struct device_node *node = dev->of_node;
249 struct device_node *child_np; 249 struct device_node *child_np;
250 struct component_match *match = NULL; 250 struct component_match *match = NULL;
251 251
252 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 252 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
253 253
254 of_platform_populate(node, NULL, NULL, dev);
255
254 child_np = of_get_next_available_child(node, NULL); 256 child_np = of_get_next_available_child(node, NULL);
255 257
256 while (child_np) { 258 while (child_np) {
@@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev)
259 child_np = of_get_next_available_child(node, child_np); 261 child_np = of_get_next_available_child(node, child_np);
260 } 262 }
261 263
262 return component_master_add_with_match(dev, &sti_drm_ops, match); 264 return component_master_add_with_match(dev, &sti_ops, match);
263}
264
265static int sti_drm_master_remove(struct platform_device *pdev)
266{
267 component_master_del(&pdev->dev, &sti_drm_ops);
268 return 0;
269} 265}
270 266
271static struct platform_driver sti_drm_master_driver = { 267static int sti_platform_remove(struct platform_device *pdev)
272 .probe = sti_drm_master_probe,
273 .remove = sti_drm_master_remove,
274 .driver = {
275 .name = DRIVER_NAME "__master",
276 },
277};
278
279static int sti_drm_platform_probe(struct platform_device *pdev)
280{ 268{
281 struct device *dev = &pdev->dev; 269 component_master_del(&pdev->dev, &sti_ops);
282 struct device_node *node = dev->of_node;
283 struct platform_device *master;
284
285 of_platform_populate(node, NULL, NULL, dev);
286
287 platform_driver_register(&sti_drm_master_driver);
288 master = platform_device_register_resndata(dev,
289 DRIVER_NAME "__master", -1,
290 NULL, 0, NULL, 0);
291 if (IS_ERR(master))
292 return PTR_ERR(master);
293
294 platform_set_drvdata(pdev, master);
295 return 0;
296}
297
298static int sti_drm_platform_remove(struct platform_device *pdev)
299{
300 struct platform_device *master = platform_get_drvdata(pdev);
301
302 of_platform_depopulate(&pdev->dev); 270 of_platform_depopulate(&pdev->dev);
303 platform_device_unregister(master); 271
304 platform_driver_unregister(&sti_drm_master_driver);
305 return 0; 272 return 0;
306} 273}
307 274
308static const struct of_device_id sti_drm_dt_ids[] = { 275static const struct of_device_id sti_dt_ids[] = {
309 { .compatible = "st,sti-display-subsystem", }, 276 { .compatible = "st,sti-display-subsystem", },
310 { /* end node */ }, 277 { /* end node */ },
311}; 278};
312MODULE_DEVICE_TABLE(of, sti_drm_dt_ids); 279MODULE_DEVICE_TABLE(of, sti_dt_ids);
313 280
314static struct platform_driver sti_drm_platform_driver = { 281static struct platform_driver sti_platform_driver = {
315 .probe = sti_drm_platform_probe, 282 .probe = sti_platform_probe,
316 .remove = sti_drm_platform_remove, 283 .remove = sti_platform_remove,
317 .driver = { 284 .driver = {
318 .name = DRIVER_NAME, 285 .name = DRIVER_NAME,
319 .of_match_table = sti_drm_dt_ids, 286 .of_match_table = sti_dt_ids,
320 }, 287 },
321}; 288};
322 289
323module_platform_driver(sti_drm_platform_driver); 290module_platform_driver(sti_platform_driver);
324 291
325MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); 292MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
326MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); 293MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index c413aa3ff402..9372f69e1859 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -4,8 +4,8 @@
4 * License terms: GNU General Public License (GPL), version 2 4 * License terms: GNU General Public License (GPL), version 2
5 */ 5 */
6 6
7#ifndef _STI_DRM_DRV_H_ 7#ifndef _STI_DRV_H_
8#define _STI_DRM_DRV_H_ 8#define _STI_DRV_H_
9 9
10#include <drm/drmP.h> 10#include <drm/drmP.h>
11 11
@@ -20,7 +20,7 @@ struct sti_tvout;
20 * @plane_zorder_property: z-order property for CRTC planes 20 * @plane_zorder_property: z-order property for CRTC planes
21 * @drm_dev: drm device 21 * @drm_dev: drm device
22 */ 22 */
23struct sti_drm_private { 23struct sti_private {
24 struct sti_compositor *compo; 24 struct sti_compositor *compo;
25 struct drm_property *plane_zorder_property; 25 struct drm_property *plane_zorder_property;
26 struct drm_device *drm_dev; 26 struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906fd8846..9365670427ad 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11 11
12#include <drm/drm_fb_cma_helper.h>
13#include <drm/drm_gem_cma_helper.h>
14
12#include "sti_compositor.h" 15#include "sti_compositor.h"
13#include "sti_gdp.h" 16#include "sti_gdp.h"
14#include "sti_layer.h" 17#include "sti_plane.h"
15#include "sti_vtg.h" 18#include "sti_vtg.h"
16 19
17#define ALPHASWITCH BIT(6) 20#define ALPHASWITCH BIT(6)
@@ -26,7 +29,7 @@
26#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) 29#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
27#define GDP_ARGB8565 0x04 30#define GDP_ARGB8565 0x04
28#define GDP_ARGB8888 0x05 31#define GDP_ARGB8888 0x05
29#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) 32#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
30#define GDP_ARGB1555 0x06 33#define GDP_ARGB1555 0x06
31#define GDP_ARGB4444 0x07 34#define GDP_ARGB4444 0x07
32#define GDP_CLUT8 0x0B 35#define GDP_CLUT8 0x0B
@@ -53,8 +56,8 @@
53#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) 56#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
54#define GAM_GDP_SIZE_MAX 0x7FF 57#define GAM_GDP_SIZE_MAX 0x7FF
55 58
56#define GDP_NODE_NB_BANK 2 59#define GDP_NODE_NB_BANK 2
57#define GDP_NODE_PER_FIELD 2 60#define GDP_NODE_PER_FIELD 2
58 61
59struct sti_gdp_node { 62struct sti_gdp_node {
60 u32 gam_gdp_ctl; 63 u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@ struct sti_gdp_node_list {
85/** 88/**
86 * STI GDP structure 89 * STI GDP structure
87 * 90 *
88 * @layer: layer structure 91 * @sti_plane: sti_plane structure
92 * @dev: driver device
93 * @regs: gdp registers
89 * @clk_pix: pixel clock for the current gdp 94 * @clk_pix: pixel clock for the current gdp
90 * @clk_main_parent: gdp parent clock if main path used 95 * @clk_main_parent: gdp parent clock if main path used
91 * @clk_aux_parent: gdp parent clock if aux path used 96 * @clk_aux_parent: gdp parent clock if aux path used
92 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification 97 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
93 * @is_curr_top: true if the current node processed is the top field 98 * @is_curr_top: true if the current node processed is the top field
94 * @node_list: array of node list 99 * @node_list: array of node list
95 */ 100 */
96struct sti_gdp { 101struct sti_gdp {
97 struct sti_layer layer; 102 struct sti_plane plane;
103 struct device *dev;
104 void __iomem *regs;
98 struct clk *clk_pix; 105 struct clk *clk_pix;
99 struct clk *clk_main_parent; 106 struct clk *clk_main_parent;
100 struct clk *clk_aux_parent; 107 struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@ struct sti_gdp {
103 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; 110 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
104}; 111};
105 112
106#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) 113#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
107 114
108static const uint32_t gdp_supported_formats[] = { 115static const uint32_t gdp_supported_formats[] = {
109 DRM_FORMAT_XRGB8888, 116 DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = {
120 DRM_FORMAT_C8, 127 DRM_FORMAT_C8,
121}; 128};
122 129
123static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
124{
125 return gdp_supported_formats;
126}
127
128static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
129{
130 return ARRAY_SIZE(gdp_supported_formats);
131}
132
133static int sti_gdp_fourcc2format(int fourcc) 130static int sti_gdp_fourcc2format(int fourcc)
134{ 131{
135 switch (fourcc) { 132 switch (fourcc) {
@@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format)
175 172
176/** 173/**
177 * sti_gdp_get_free_nodes 174 * sti_gdp_get_free_nodes
178 * @layer: gdp layer 175 * @gdp: gdp pointer
179 * 176 *
180 * Look for a GDP node list that is not currently read by the HW. 177 * Look for a GDP node list that is not currently read by the HW.
181 * 178 *
182 * RETURNS: 179 * RETURNS:
183 * Pointer to the free GDP node list 180 * Pointer to the free GDP node list
184 */ 181 */
185static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 182static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
186{ 183{
187 int hw_nvn; 184 int hw_nvn;
188 struct sti_gdp *gdp = to_sti_gdp(layer);
189 unsigned int i; 185 unsigned int i;
190 186
191 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 187 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
192 if (!hw_nvn) 188 if (!hw_nvn)
193 goto end; 189 goto end;
194 190
@@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
199 195
200 /* in hazardious cases restart with the first node */ 196 /* in hazardious cases restart with the first node */
201 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", 197 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
202 sti_layer_to_str(layer), hw_nvn); 198 sti_plane_to_str(&gdp->plane), hw_nvn);
203 199
204end: 200end:
205 return &gdp->node_list[0]; 201 return &gdp->node_list[0];
@@ -207,7 +203,7 @@ end:
207 203
208/** 204/**
209 * sti_gdp_get_current_nodes 205 * sti_gdp_get_current_nodes
210 * @layer: GDP layer 206 * @gdp: gdp pointer
211 * 207 *
212 * Look for GDP nodes that are currently read by the HW. 208 * Look for GDP nodes that are currently read by the HW.
213 * 209 *
@@ -215,13 +211,12 @@ end:
215 * Pointer to the current GDP node list 211 * Pointer to the current GDP node list
216 */ 212 */
217static 213static
218struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 214struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
219{ 215{
220 int hw_nvn; 216 int hw_nvn;
221 struct sti_gdp *gdp = to_sti_gdp(layer);
222 unsigned int i; 217 unsigned int i;
223 218
224 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 219 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
225 if (!hw_nvn) 220 if (!hw_nvn)
226 goto end; 221 goto end;
227 222
@@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
232 227
233end: 228end:
234 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", 229 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
235 hw_nvn, sti_layer_to_str(layer)); 230 hw_nvn, sti_plane_to_str(&gdp->plane));
236 231
237 return NULL; 232 return NULL;
238} 233}
239 234
240/** 235/**
241 * sti_gdp_prepare_layer 236 * sti_gdp_disable
242 * @lay: gdp layer 237 * @gdp: gdp pointer
243 * @first_prepare: true if it is the first time this function is called
244 *
245 * Update the free GDP node list according to the layer properties.
246 *
247 * RETURNS:
248 * 0 on success.
249 */
250static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
251{
252 struct sti_gdp_node_list *list;
253 struct sti_gdp_node *top_field, *btm_field;
254 struct drm_display_mode *mode = layer->mode;
255 struct device *dev = layer->dev;
256 struct sti_gdp *gdp = to_sti_gdp(layer);
257 struct sti_compositor *compo = dev_get_drvdata(dev);
258 int format;
259 unsigned int depth, bpp;
260 int rate = mode->clock * 1000;
261 int res;
262 u32 ydo, xdo, yds, xds;
263
264 list = sti_gdp_get_free_nodes(layer);
265 top_field = list->top_field;
266 btm_field = list->btm_field;
267
268 dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
269 sti_layer_to_str(layer), top_field, btm_field);
270
271 /* Build the top field from layer params */
272 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
273 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
274 format = sti_gdp_fourcc2format(layer->format);
275 if (format == -1) {
276 DRM_ERROR("Format not supported by GDP %.4s\n",
277 (char *)&layer->format);
278 return 1;
279 }
280 top_field->gam_gdp_ctl |= format;
281 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
282 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
283
284 /* pixel memory location */
285 drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
286 top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
287 top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
288 top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
289
290 /* input parameters */
291 top_field->gam_gdp_pmp = layer->pitches[0];
292 top_field->gam_gdp_size =
293 clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
294 clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
295
296 /* output parameters */
297 ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
298 yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
299 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
300 xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
301 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
302 top_field->gam_gdp_vps = (yds << 16) | xds;
303
304 /* Same content and chained together */
305 memcpy(btm_field, top_field, sizeof(*btm_field));
306 top_field->gam_gdp_nvn = list->btm_field_paddr;
307 btm_field->gam_gdp_nvn = list->top_field_paddr;
308
309 /* Interlaced mode */
310 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
311 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
312 layer->pitches[0];
313
314 if (first_prepare) {
315 /* Register gdp callback */
316 if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
317 compo->vtg_main : compo->vtg_aux,
318 &gdp->vtg_field_nb, layer->mixer_id)) {
319 DRM_ERROR("Cannot register VTG notifier\n");
320 return 1;
321 }
322
323 /* Set and enable gdp clock */
324 if (gdp->clk_pix) {
325 struct clk *clkp;
326 /* According to the mixer used, the gdp pixel clock
327 * should have a different parent clock. */
328 if (layer->mixer_id == STI_MIXER_MAIN)
329 clkp = gdp->clk_main_parent;
330 else
331 clkp = gdp->clk_aux_parent;
332
333 if (clkp)
334 clk_set_parent(gdp->clk_pix, clkp);
335
336 res = clk_set_rate(gdp->clk_pix, rate);
337 if (res < 0) {
338 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
339 rate);
340 return 1;
341 }
342
343 if (clk_prepare_enable(gdp->clk_pix)) {
344 DRM_ERROR("Failed to prepare/enable gdp\n");
345 return 1;
346 }
347 }
348 }
349
350 return 0;
351}
352
353/**
354 * sti_gdp_commit_layer
355 * @lay: gdp layer
356 *
357 * Update the NVN field of the 'right' field of the current GDP node (being
358 * used by the HW) with the address of the updated ('free') top field GDP node.
359 * - In interlaced mode the 'right' field is the bottom field as we update
360 * frames starting from their top field
361 * - In progressive mode, we update both bottom and top fields which are
362 * equal nodes.
363 * At the next VSYNC, the updated node list will be used by the HW.
364 *
365 * RETURNS:
366 * 0 on success.
367 */
368static int sti_gdp_commit_layer(struct sti_layer *layer)
369{
370 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
371 struct sti_gdp_node *updated_top_node = updated_list->top_field;
372 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
373 struct sti_gdp *gdp = to_sti_gdp(layer);
374 u32 dma_updated_top = updated_list->top_field_paddr;
375 u32 dma_updated_btm = updated_list->btm_field_paddr;
376 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
377
378 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
379 sti_layer_to_str(layer),
380 updated_top_node, updated_btm_node);
381 dev_dbg(layer->dev, "Current NVN:0x%X\n",
382 readl(layer->regs + GAM_GDP_NVN_OFFSET));
383 dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
384 (unsigned long)layer->paddr,
385 readl(layer->regs + GAM_GDP_PML_OFFSET));
386
387 if (curr_list == NULL) {
388 /* First update or invalid node should directly write in the
389 * hw register */
390 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
391 sti_layer_to_str(layer));
392
393 writel(gdp->is_curr_top == true ?
394 dma_updated_btm : dma_updated_top,
395 layer->regs + GAM_GDP_NVN_OFFSET);
396 return 0;
397 }
398
399 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
400 if (gdp->is_curr_top == true) {
401 /* Do not update in the middle of the frame, but
402 * postpone the update after the bottom field has
403 * been displayed */
404 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
405 } else {
406 /* Direct update to avoid one frame delay */
407 writel(dma_updated_top,
408 layer->regs + GAM_GDP_NVN_OFFSET);
409 }
410 } else {
411 /* Direct update for progressive to avoid one frame delay */
412 writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
413 }
414
415 return 0;
416}
417
418/**
419 * sti_gdp_disable_layer
420 * @lay: gdp layer
421 * 238 *
422 * Disable a GDP. 239 * Disable a GDP.
423 *
424 * RETURNS:
425 * 0 on success.
426 */ 240 */
427static int sti_gdp_disable_layer(struct sti_layer *layer) 241static void sti_gdp_disable(struct sti_gdp *gdp)
428{ 242{
243 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
244 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
245 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
429 unsigned int i; 246 unsigned int i;
430 struct sti_gdp *gdp = to_sti_gdp(layer);
431 struct sti_compositor *compo = dev_get_drvdata(layer->dev);
432 247
433 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 248 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
434 249
435 /* Set the nodes as 'to be ignored on mixer' */ 250 /* Set the nodes as 'to be ignored on mixer' */
436 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 251 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
438 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 253 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
439 } 254 }
440 255
441 if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? 256 if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
442 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) 257 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
443 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 258 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
444 259
445 if (gdp->clk_pix) 260 if (gdp->clk_pix)
446 clk_disable_unprepare(gdp->clk_pix); 261 clk_disable_unprepare(gdp->clk_pix);
447 262
448 return 0; 263 gdp->plane.status = STI_PLANE_DISABLED;
449} 264}
450 265
451/** 266/**
@@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb,
464{ 279{
465 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); 280 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
466 281
282 if (gdp->plane.status == STI_PLANE_FLUSHING) {
283 /* disable need to be synchronize on vsync event */
284 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
285 sti_plane_to_str(&gdp->plane));
286
287 sti_gdp_disable(gdp);
288 }
289
467 switch (event) { 290 switch (event) {
468 case VTG_TOP_FIELD_EVENT: 291 case VTG_TOP_FIELD_EVENT:
469 gdp->is_curr_top = true; 292 gdp->is_curr_top = true;
@@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
479 return 0; 302 return 0;
480} 303}
481 304
482static void sti_gdp_init(struct sti_layer *layer) 305static void sti_gdp_init(struct sti_gdp *gdp)
483{ 306{
484 struct sti_gdp *gdp = to_sti_gdp(layer); 307 struct device_node *np = gdp->dev->of_node;
485 struct device_node *np = layer->dev->of_node;
486 dma_addr_t dma_addr; 308 dma_addr_t dma_addr;
487 void *base; 309 void *base;
488 unsigned int i, size; 310 unsigned int i, size;
@@ -490,8 +312,8 @@ static void sti_gdp_init(struct sti_layer *layer)
490 /* Allocate all the nodes within a single memory page */ 312 /* Allocate all the nodes within a single memory page */
491 size = sizeof(struct sti_gdp_node) * 313 size = sizeof(struct sti_gdp_node) *
492 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 314 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
493 base = dma_alloc_writecombine(layer->dev, 315 base = dma_alloc_writecombine(gdp->dev,
494 size, &dma_addr, GFP_KERNEL | GFP_DMA); 316 size, &dma_addr, GFP_KERNEL | GFP_DMA);
495 317
496 if (!base) { 318 if (!base) {
497 DRM_ERROR("Failed to allocate memory for GDP node\n"); 319 DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer)
526 /* GDP of STiH407 chip have its own pixel clock */ 348 /* GDP of STiH407 chip have its own pixel clock */
527 char *clk_name; 349 char *clk_name;
528 350
529 switch (layer->desc) { 351 switch (gdp->plane.desc) {
530 case STI_GDP_0: 352 case STI_GDP_0:
531 clk_name = "pix_gdp1"; 353 clk_name = "pix_gdp1";
532 break; 354 break;
@@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer)
544 return; 366 return;
545 } 367 }
546 368
547 gdp->clk_pix = devm_clk_get(layer->dev, clk_name); 369 gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
548 if (IS_ERR(gdp->clk_pix)) 370 if (IS_ERR(gdp->clk_pix))
549 DRM_ERROR("Cannot get %s clock\n", clk_name); 371 DRM_ERROR("Cannot get %s clock\n", clk_name);
550 372
551 gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent"); 373 gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
552 if (IS_ERR(gdp->clk_main_parent)) 374 if (IS_ERR(gdp->clk_main_parent))
553 DRM_ERROR("Cannot get main_parent clock\n"); 375 DRM_ERROR("Cannot get main_parent clock\n");
554 376
555 gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent"); 377 gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
556 if (IS_ERR(gdp->clk_aux_parent)) 378 if (IS_ERR(gdp->clk_aux_parent))
557 DRM_ERROR("Cannot get aux_parent clock\n"); 379 DRM_ERROR("Cannot get aux_parent clock\n");
558 } 380 }
559} 381}
560 382
561static const struct sti_layer_funcs gdp_ops = { 383static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
562 .get_formats = sti_gdp_get_formats, 384 struct drm_plane_state *oldstate)
563 .get_nb_formats = sti_gdp_get_nb_formats, 385{
564 .init = sti_gdp_init, 386 struct drm_plane_state *state = drm_plane->state;
565 .prepare = sti_gdp_prepare_layer, 387 struct sti_plane *plane = to_sti_plane(drm_plane);
566 .commit = sti_gdp_commit_layer, 388 struct sti_gdp *gdp = to_sti_gdp(plane);
567 .disable = sti_gdp_disable_layer, 389 struct drm_crtc *crtc = state->crtc;
390 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
391 struct drm_framebuffer *fb = state->fb;
392 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
393 struct sti_mixer *mixer;
394 struct drm_display_mode *mode;
395 int dst_x, dst_y, dst_w, dst_h;
396 int src_x, src_y, src_w, src_h;
397 struct drm_gem_cma_object *cma_obj;
398 struct sti_gdp_node_list *list;
399 struct sti_gdp_node_list *curr_list;
400 struct sti_gdp_node *top_field, *btm_field;
401 u32 dma_updated_top;
402 u32 dma_updated_btm;
403 int format;
404 unsigned int depth, bpp;
405 u32 ydo, xdo, yds, xds;
406 int res;
407
408 /* Manage the case where crtc is null (disabled) */
409 if (!crtc)
410 return;
411
412 mixer = to_sti_mixer(crtc);
413 mode = &crtc->mode;
414 dst_x = state->crtc_x;
415 dst_y = state->crtc_y;
416 dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
417 dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
418 /* src_x are in 16.16 format */
419 src_x = state->src_x >> 16;
420 src_y = state->src_y >> 16;
421 src_w = state->src_w >> 16;
422 src_h = state->src_h >> 16;
423
424 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
425 crtc->base.id, sti_mixer_to_str(mixer),
426 drm_plane->base.id, sti_plane_to_str(plane));
427 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
428 sti_plane_to_str(plane),
429 dst_w, dst_h, dst_x, dst_y,
430 src_w, src_h, src_x, src_y);
431
432 list = sti_gdp_get_free_nodes(gdp);
433 top_field = list->top_field;
434 btm_field = list->btm_field;
435
436 dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
437 sti_plane_to_str(plane), top_field, btm_field);
438
439 /* build the top field */
440 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
441 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
442 format = sti_gdp_fourcc2format(fb->pixel_format);
443 if (format == -1) {
444 DRM_ERROR("Format not supported by GDP %.4s\n",
445 (char *)&fb->pixel_format);
446 return;
447 }
448 top_field->gam_gdp_ctl |= format;
449 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
450 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
451
452 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
453 if (!cma_obj) {
454 DRM_ERROR("Can't get CMA GEM object for fb\n");
455 return;
456 }
457
458 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
459 (char *)&fb->pixel_format,
460 (unsigned long)cma_obj->paddr);
461
462 /* pixel memory location */
463 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
464 top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
465 top_field->gam_gdp_pml += src_x * (bpp >> 3);
466 top_field->gam_gdp_pml += src_y * fb->pitches[0];
467
468 /* input parameters */
469 top_field->gam_gdp_pmp = fb->pitches[0];
470 top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
471 clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
472
473 /* output parameters */
474 ydo = sti_vtg_get_line_number(*mode, dst_y);
475 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
476 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
477 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
478 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
479 top_field->gam_gdp_vps = (yds << 16) | xds;
480
481 /* Same content and chained together */
482 memcpy(btm_field, top_field, sizeof(*btm_field));
483 top_field->gam_gdp_nvn = list->btm_field_paddr;
484 btm_field->gam_gdp_nvn = list->top_field_paddr;
485
486 /* Interlaced mode */
487 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
488 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
489 fb->pitches[0];
490
491 if (first_prepare) {
492 /* Register gdp callback */
493 if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
494 compo->vtg_main : compo->vtg_aux,
495 &gdp->vtg_field_nb, mixer->id)) {
496 DRM_ERROR("Cannot register VTG notifier\n");
497 return;
498 }
499
500 /* Set and enable gdp clock */
501 if (gdp->clk_pix) {
502 struct clk *clkp;
503 int rate = mode->clock * 1000;
504
505 /* According to the mixer used, the gdp pixel clock
506 * should have a different parent clock. */
507 if (mixer->id == STI_MIXER_MAIN)
508 clkp = gdp->clk_main_parent;
509 else
510 clkp = gdp->clk_aux_parent;
511
512 if (clkp)
513 clk_set_parent(gdp->clk_pix, clkp);
514
515 res = clk_set_rate(gdp->clk_pix, rate);
516 if (res < 0) {
517 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
518 rate);
519 return;
520 }
521
522 if (clk_prepare_enable(gdp->clk_pix)) {
523 DRM_ERROR("Failed to prepare/enable gdp\n");
524 return;
525 }
526 }
527 }
528
529 /* Update the NVN field of the 'right' field of the current GDP node
530 * (being used by the HW) with the address of the updated ('free') top
531 * field GDP node.
532 * - In interlaced mode the 'right' field is the bottom field as we
533 * update frames starting from their top field
534 * - In progressive mode, we update both bottom and top fields which
535 * are equal nodes.
536 * At the next VSYNC, the updated node list will be used by the HW.
537 */
538 curr_list = sti_gdp_get_current_nodes(gdp);
539 dma_updated_top = list->top_field_paddr;
540 dma_updated_btm = list->btm_field_paddr;
541
542 dev_dbg(gdp->dev, "Current NVN:0x%X\n",
543 readl(gdp->regs + GAM_GDP_NVN_OFFSET));
544 dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
545 (unsigned long)cma_obj->paddr,
546 readl(gdp->regs + GAM_GDP_PML_OFFSET));
547
548 if (!curr_list) {
549 /* First update or invalid node should directly write in the
550 * hw register */
551 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
552 sti_plane_to_str(plane));
553
554 writel(gdp->is_curr_top ?
555 dma_updated_btm : dma_updated_top,
556 gdp->regs + GAM_GDP_NVN_OFFSET);
557 goto end;
558 }
559
560 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
561 if (gdp->is_curr_top) {
562 /* Do not update in the middle of the frame, but
563 * postpone the update after the bottom field has
564 * been displayed */
565 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
566 } else {
567 /* Direct update to avoid one frame delay */
568 writel(dma_updated_top,
569 gdp->regs + GAM_GDP_NVN_OFFSET);
570 }
571 } else {
572 /* Direct update for progressive to avoid one frame delay */
573 writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
574 }
575
576end:
577 plane->status = STI_PLANE_UPDATED;
578}
579
580static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
581 struct drm_plane_state *oldstate)
582{
583 struct sti_plane *plane = to_sti_plane(drm_plane);
584 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
585
586 if (!drm_plane->crtc) {
587 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
588 drm_plane->base.id);
589 return;
590 }
591
592 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
593 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
594 drm_plane->base.id, sti_plane_to_str(plane));
595
596 plane->status = STI_PLANE_DISABLING;
597}
598
599static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
600 .atomic_update = sti_gdp_atomic_update,
601 .atomic_disable = sti_gdp_atomic_disable,
568}; 602};
569 603
570struct sti_layer *sti_gdp_create(struct device *dev, int id) 604struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
605 struct device *dev, int desc,
606 void __iomem *baseaddr,
607 unsigned int possible_crtcs,
608 enum drm_plane_type type)
571{ 609{
572 struct sti_gdp *gdp; 610 struct sti_gdp *gdp;
611 int res;
573 612
574 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); 613 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
575 if (!gdp) { 614 if (!gdp) {
@@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
577 return NULL; 616 return NULL;
578 } 617 }
579 618
580 gdp->layer.ops = &gdp_ops; 619 gdp->dev = dev;
620 gdp->regs = baseaddr;
621 gdp->plane.desc = desc;
622 gdp->plane.status = STI_PLANE_DISABLED;
623
581 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; 624 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
582 625
583 return (struct sti_layer *)gdp; 626 sti_gdp_init(gdp);
627
628 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
629 possible_crtcs,
630 &sti_plane_helpers_funcs,
631 gdp_supported_formats,
632 ARRAY_SIZE(gdp_supported_formats),
633 type);
634 if (res) {
635 DRM_ERROR("Failed to initialize universal plane\n");
636 goto err;
637 }
638
639 drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
640
641 sti_plane_init_property(&gdp->plane, type);
642
643 return &gdp->plane.drm_plane;
644
645err:
646 devm_kfree(dev, gdp);
647 return NULL;
584} 648}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab68274ad3..73947a4a8004 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14struct sti_layer *sti_gdp_create(struct device *dev, int id); 14struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
15 15 struct device *dev, int desc,
16 void __iomem *baseaddr,
17 unsigned int possible_crtcs,
18 enum drm_plane_type type);
16#endif 19#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d54487c..09e29e43423e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
588 return count; 588 return count;
589 589
590fail: 590fail:
591 DRM_ERROR("Can not read HDMI EDID\n"); 591 DRM_ERROR("Can't read HDMI EDID\n");
592 return 0; 592 return 0;
593} 593}
594 594
@@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
693 struct sti_hdmi_connector *connector; 693 struct sti_hdmi_connector *connector;
694 struct drm_connector *drm_connector; 694 struct drm_connector *drm_connector;
695 struct drm_bridge *bridge; 695 struct drm_bridge *bridge;
696 struct device_node *ddc;
697 int err; 696 int err;
698 697
699 ddc = of_parse_phandle(dev->of_node, "ddc", 0);
700 if (ddc) {
701 hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
702 if (!hdmi->ddc_adapt) {
703 err = -EPROBE_DEFER;
704 of_node_put(ddc);
705 return err;
706 }
707
708 of_node_put(ddc);
709 }
710
711 /* Set the drm device handle */ 698 /* Set the drm device handle */
712 hdmi->drm_dev = drm_dev; 699 hdmi->drm_dev = drm_dev;
713 700
@@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
796 struct sti_hdmi *hdmi; 783 struct sti_hdmi *hdmi;
797 struct device_node *np = dev->of_node; 784 struct device_node *np = dev->of_node;
798 struct resource *res; 785 struct resource *res;
786 struct device_node *ddc;
799 int ret; 787 int ret;
800 788
801 DRM_INFO("%s\n", __func__); 789 DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev)
804 if (!hdmi) 792 if (!hdmi)
805 return -ENOMEM; 793 return -ENOMEM;
806 794
795 ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
796 if (ddc) {
797 hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
798 if (!hdmi->ddc_adapt) {
799 of_node_put(ddc);
800 return -EPROBE_DEFER;
801 }
802
803 of_node_put(ddc);
804 }
805
807 hdmi->dev = pdev->dev; 806 hdmi->dev = pdev->dev;
808 807
809 /* Get resources */ 808 /* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62de1b2e..7c8f9b8bfae1 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
12#include <linux/reset.h> 12#include <linux/reset.h>
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_fb_cma_helper.h>
16#include <drm/drm_gem_cma_helper.h>
15 17
16#include "sti_drm_plane.h" 18#include "sti_compositor.h"
17#include "sti_hqvdp.h"
18#include "sti_hqvdp_lut.h" 19#include "sti_hqvdp_lut.h"
19#include "sti_layer.h" 20#include "sti_plane.h"
20#include "sti_vtg.h" 21#include "sti_vtg.h"
21 22
22/* Firmware name */ 23/* Firmware name */
@@ -322,8 +323,7 @@ struct sti_hqvdp_cmd {
322 * @dev: driver device 323 * @dev: driver device
323 * @drm_dev: the drm device 324 * @drm_dev: the drm device
324 * @regs: registers 325 * @regs: registers
325 * @layer: layer structure for hqvdp it self 326 * @plane: plane structure for hqvdp it self
326 * @vid_plane: VID plug used as link with compositor IP
327 * @clk: IP clock 327 * @clk: IP clock
328 * @clk_pix_main: pix main clock 328 * @clk_pix_main: pix main clock
329 * @reset: reset control 329 * @reset: reset control
@@ -334,13 +334,13 @@ struct sti_hqvdp_cmd {
334 * @hqvdp_cmd: buffer of commands 334 * @hqvdp_cmd: buffer of commands
335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd 335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
336 * @vtg: vtg for main data path 336 * @vtg: vtg for main data path
337 * @xp70_initialized: true if xp70 is already initialized
337 */ 338 */
338struct sti_hqvdp { 339struct sti_hqvdp {
339 struct device *dev; 340 struct device *dev;
340 struct drm_device *drm_dev; 341 struct drm_device *drm_dev;
341 void __iomem *regs; 342 void __iomem *regs;
342 struct sti_layer layer; 343 struct sti_plane plane;
343 struct drm_plane *vid_plane;
344 struct clk *clk; 344 struct clk *clk;
345 struct clk *clk_pix_main; 345 struct clk *clk_pix_main;
346 struct reset_control *reset; 346 struct reset_control *reset;
@@ -351,24 +351,15 @@ struct sti_hqvdp {
351 void *hqvdp_cmd; 351 void *hqvdp_cmd;
352 dma_addr_t hqvdp_cmd_paddr; 352 dma_addr_t hqvdp_cmd_paddr;
353 struct sti_vtg *vtg; 353 struct sti_vtg *vtg;
354 bool xp70_initialized;
354}; 355};
355 356
356#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer) 357#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
357 358
358static const uint32_t hqvdp_supported_formats[] = { 359static const uint32_t hqvdp_supported_formats[] = {
359 DRM_FORMAT_NV12, 360 DRM_FORMAT_NV12,
360}; 361};
361 362
362static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
363{
364 return hqvdp_supported_formats;
365}
366
367static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
368{
369 return ARRAY_SIZE(hqvdp_supported_formats);
370}
371
372/** 363/**
373 * sti_hqvdp_get_free_cmd 364 * sti_hqvdp_get_free_cmd
374 * @hqvdp: hqvdp structure 365 * @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
484 475
485/** 476/**
486 * sti_hqvdp_check_hw_scaling 477 * sti_hqvdp_check_hw_scaling
487 * @layer: hqvdp layer 478 * @hqvdp: hqvdp pointer
479 * @mode: display mode with timing constraints
480 * @src_w: source width
481 * @src_h: source height
482 * @dst_w: destination width
483 * @dst_h: destination height
488 * 484 *
489 * Check if the HW is able to perform the scaling request 485 * Check if the HW is able to perform the scaling request
490 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: 486 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
498 * RETURNS: 494 * RETURNS:
499 * True if the HW can scale. 495 * True if the HW can scale.
500 */ 496 */
501static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer) 497static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
498 struct drm_display_mode *mode,
499 int src_w, int src_h,
500 int dst_w, int dst_h)
502{ 501{
503 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
504 unsigned long lfw; 502 unsigned long lfw;
505 unsigned int inv_zy; 503 unsigned int inv_zy;
506 504
507 lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 505 lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
508 lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000; 506 lfw /= max(src_w, dst_w) * mode->clock / 1000;
509 507
510 inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h); 508 inv_zy = DIV_ROUND_UP(src_h, dst_h);
511 509
512 return (inv_zy <= lfw) ? true : false; 510 return (inv_zy <= lfw) ? true : false;
513} 511}
514 512
515/** 513/**
516 * sti_hqvdp_prepare_layer 514 * sti_hqvdp_disable
517 * @layer: hqvdp layer 515 * @hqvdp: hqvdp pointer
518 * @first_prepare: true if it is the first time this function is called
519 * 516 *
520 * Prepares a command for the firmware 517 * Disables the HQVDP plane
521 *
522 * RETURNS:
523 * 0 on success.
524 */ 518 */
525static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 519static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
526{
527 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
528 struct sti_hqvdp_cmd *cmd;
529 int scale_h, scale_v;
530 int cmd_offset;
531
532 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
533
534 /* prepare and commit VID plane */
535 hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
536 layer->crtc, layer->fb,
537 layer->dst_x, layer->dst_y,
538 layer->dst_w, layer->dst_h,
539 layer->src_x, layer->src_y,
540 layer->src_w, layer->src_h);
541
542 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
543 if (cmd_offset == -1) {
544 DRM_ERROR("No available hqvdp_cmd now\n");
545 return -EBUSY;
546 }
547 cmd = hqvdp->hqvdp_cmd + cmd_offset;
548
549 if (!sti_hqvdp_check_hw_scaling(layer)) {
550 DRM_ERROR("Scaling beyond HW capabilities\n");
551 return -EINVAL;
552 }
553
554 /* Static parameters, defaulting to progressive mode */
555 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
556 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
557 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
558 cmd->csdi.config = CSDI_CONFIG_PROG;
559
560 /* VC1RE, FMD bypassed : keep everything set to 0
561 * IQI/P2I bypassed */
562 cmd->iqi.config = IQI_CONFIG_DFLT;
563 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
564 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
565 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
566
567 /* Buffer planes address */
568 cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
569 cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
570
571 /* Pitches */
572 cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
573 layer->pitches[0];
574 cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
575 layer->pitches[1];
576
577 /* Input / output size
578 * Align to upper even value */
579 layer->dst_w = ALIGN(layer->dst_w, 2);
580 layer->dst_h = ALIGN(layer->dst_h, 2);
581
582 if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
583 (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
584 (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
585 (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
586 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
587 layer->src_w, layer->src_h,
588 layer->dst_w, layer->dst_h);
589 return -EINVAL;
590 }
591 cmd->top.input_viewport_size = cmd->top.input_frame_size =
592 layer->src_h << 16 | layer->src_w;
593 cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
594 cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
595
596 /* Handle interlaced */
597 if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
598 /* Top field to display */
599 cmd->top.config = TOP_CONFIG_INTER_TOP;
600
601 /* Update pitches and vert size */
602 cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
603 layer->src_w;
604 cmd->top.luma_processed_pitch *= 2;
605 cmd->top.luma_src_pitch *= 2;
606 cmd->top.chroma_processed_pitch *= 2;
607 cmd->top.chroma_src_pitch *= 2;
608
609 /* Enable directional deinterlacing processing */
610 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
611 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
612 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
613 }
614
615 /* Update hvsrc lut coef */
616 scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
617 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
618
619 scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
620 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
621
622 if (first_prepare) {
623 /* Prevent VTG shutdown */
624 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
625 DRM_ERROR("Failed to prepare/enable pix main clk\n");
626 return -ENXIO;
627 }
628
629 /* Register VTG Vsync callback to handle bottom fields */
630 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
631 sti_vtg_register_client(hqvdp->vtg,
632 &hqvdp->vtg_nb, layer->mixer_id)) {
633 DRM_ERROR("Cannot register VTG notifier\n");
634 return -ENXIO;
635 }
636 }
637
638 return 0;
639}
640
641static int sti_hqvdp_commit_layer(struct sti_layer *layer)
642{ 520{
643 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
644 int cmd_offset;
645
646 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
647
648 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
649 if (cmd_offset == -1) {
650 DRM_ERROR("No available hqvdp_cmd now\n");
651 return -EBUSY;
652 }
653
654 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
655 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
656
657 hqvdp->curr_field_count++;
658
659 /* Interlaced : get ready to display the bottom field at next Vsync */
660 if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
661 hqvdp->btm_field_pending = true;
662
663 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
664 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
665
666 return 0;
667}
668
669static int sti_hqvdp_disable_layer(struct sti_layer *layer)
670{
671 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
672 int i; 521 int i;
673 522
674 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 523 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
675 524
676 /* Unregister VTG Vsync callback */ 525 /* Unregister VTG Vsync callback */
677 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && 526 if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
678 sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
679 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 527 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
680 528
681 /* Set next cmd to NULL */ 529 /* Set next cmd to NULL */
@@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
691 /* VTG can stop now */ 539 /* VTG can stop now */
692 clk_disable_unprepare(hqvdp->clk_pix_main); 540 clk_disable_unprepare(hqvdp->clk_pix_main);
693 541
694 if (i == POLL_MAX_ATTEMPT) { 542 if (i == POLL_MAX_ATTEMPT)
695 DRM_ERROR("XP70 could not revert to idle\n"); 543 DRM_ERROR("XP70 could not revert to idle\n");
696 return -ENXIO;
697 }
698
699 /* disable VID plane */
700 hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
701 544
702 return 0; 545 hqvdp->plane.status = STI_PLANE_DISABLED;
703} 546}
704 547
705/** 548/**
@@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
724 return 0; 567 return 0;
725 } 568 }
726 569
570 if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
571 /* disable need to be synchronize on vsync event */
572 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
573 sti_plane_to_str(&hqvdp->plane));
574
575 sti_hqvdp_disable(hqvdp);
576 }
577
727 if (hqvdp->btm_field_pending) { 578 if (hqvdp->btm_field_pending) {
728 /* Create the btm field command from the current one */ 579 /* Create the btm field command from the current one */
729 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 580 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
758 return 0; 609 return 0;
759} 610}
760 611
761static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id) 612static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
762{ 613{
763 struct drm_plane *plane;
764
765 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
766 struct sti_layer *layer = to_sti_layer(plane);
767
768 if (layer->desc == id)
769 return plane;
770 }
771
772 return NULL;
773}
774
775static void sti_hqvd_init(struct sti_layer *layer)
776{
777 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
778 int size; 614 int size;
779 615
780 /* find the plane macthing with vid 0 */
781 hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
782 if (!hqvdp->vid_plane) {
783 DRM_ERROR("Cannot find Main video layer\n");
784 return;
785 }
786
787 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb; 616 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
788 617
789 /* Allocate memory for the VDP commands */ 618 /* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer)
799 memset(hqvdp->hqvdp_cmd, 0, size); 628 memset(hqvdp->hqvdp_cmd, 0, size);
800} 629}
801 630
802static const struct sti_layer_funcs hqvdp_ops = { 631static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
803 .get_formats = sti_hqvdp_get_formats, 632 struct drm_plane_state *oldstate)
804 .get_nb_formats = sti_hqvdp_get_nb_formats, 633{
805 .init = sti_hqvd_init, 634 struct drm_plane_state *state = drm_plane->state;
806 .prepare = sti_hqvdp_prepare_layer, 635 struct sti_plane *plane = to_sti_plane(drm_plane);
807 .commit = sti_hqvdp_commit_layer, 636 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
808 .disable = sti_hqvdp_disable_layer, 637 struct drm_crtc *crtc = state->crtc;
638 struct sti_mixer *mixer = to_sti_mixer(crtc);
639 struct drm_framebuffer *fb = state->fb;
640 struct drm_display_mode *mode = &crtc->mode;
641 int dst_x = state->crtc_x;
642 int dst_y = state->crtc_y;
643 int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
644 int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
645 /* src_x are in 16.16 format */
646 int src_x = state->src_x >> 16;
647 int src_y = state->src_y >> 16;
648 int src_w = state->src_w >> 16;
649 int src_h = state->src_h >> 16;
650 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
651 struct drm_gem_cma_object *cma_obj;
652 struct sti_hqvdp_cmd *cmd;
653 int scale_h, scale_v;
654 int cmd_offset;
655
656 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
657 crtc->base.id, sti_mixer_to_str(mixer),
658 drm_plane->base.id, sti_plane_to_str(plane));
659 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
660 sti_plane_to_str(plane),
661 dst_w, dst_h, dst_x, dst_y,
662 src_w, src_h, src_x, src_y);
663
664 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
665 if (cmd_offset == -1) {
666 DRM_ERROR("No available hqvdp_cmd now\n");
667 return;
668 }
669 cmd = hqvdp->hqvdp_cmd + cmd_offset;
670
671 if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
672 src_w, src_h,
673 dst_w, dst_h)) {
674 DRM_ERROR("Scaling beyond HW capabilities\n");
675 return;
676 }
677
678 /* Static parameters, defaulting to progressive mode */
679 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
680 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
681 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
682 cmd->csdi.config = CSDI_CONFIG_PROG;
683
684 /* VC1RE, FMD bypassed : keep everything set to 0
685 * IQI/P2I bypassed */
686 cmd->iqi.config = IQI_CONFIG_DFLT;
687 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
688 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
689 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
690
691 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
692 if (!cma_obj) {
693 DRM_ERROR("Can't get CMA GEM object for fb\n");
694 return;
695 }
696
697 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
698 (char *)&fb->pixel_format,
699 (unsigned long)cma_obj->paddr);
700
701 /* Buffer planes address */
702 cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
703 cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
704
705 /* Pitches */
706 cmd->top.luma_processed_pitch = fb->pitches[0];
707 cmd->top.luma_src_pitch = fb->pitches[0];
708 cmd->top.chroma_processed_pitch = fb->pitches[1];
709 cmd->top.chroma_src_pitch = fb->pitches[1];
710
711 /* Input / output size
712 * Align to upper even value */
713 dst_w = ALIGN(dst_w, 2);
714 dst_h = ALIGN(dst_h, 2);
715
716 if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
717 (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
718 (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
719 (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
720 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
721 src_w, src_h,
722 dst_w, dst_h);
723 return;
724 }
725
726 cmd->top.input_viewport_size = src_h << 16 | src_w;
727 cmd->top.input_frame_size = src_h << 16 | src_w;
728 cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
729 cmd->top.input_viewport_ori = src_y << 16 | src_x;
730
731 /* Handle interlaced */
732 if (fb->flags & DRM_MODE_FB_INTERLACED) {
733 /* Top field to display */
734 cmd->top.config = TOP_CONFIG_INTER_TOP;
735
736 /* Update pitches and vert size */
737 cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
738 cmd->top.luma_processed_pitch *= 2;
739 cmd->top.luma_src_pitch *= 2;
740 cmd->top.chroma_processed_pitch *= 2;
741 cmd->top.chroma_src_pitch *= 2;
742
743 /* Enable directional deinterlacing processing */
744 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
745 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
746 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
747 }
748
749 /* Update hvsrc lut coef */
750 scale_h = SCALE_FACTOR * dst_w / src_w;
751 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
752
753 scale_v = SCALE_FACTOR * dst_h / src_h;
754 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
755
756 if (first_prepare) {
757 /* Prevent VTG shutdown */
758 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
759 DRM_ERROR("Failed to prepare/enable pix main clk\n");
760 return;
761 }
762
763 /* Register VTG Vsync callback to handle bottom fields */
764 if (sti_vtg_register_client(hqvdp->vtg,
765 &hqvdp->vtg_nb,
766 mixer->id)) {
767 DRM_ERROR("Cannot register VTG notifier\n");
768 return;
769 }
770 }
771
772 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
773 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
774
775 hqvdp->curr_field_count++;
776
777 /* Interlaced : get ready to display the bottom field at next Vsync */
778 if (fb->flags & DRM_MODE_FB_INTERLACED)
779 hqvdp->btm_field_pending = true;
780
781 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
782 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
783
784 plane->status = STI_PLANE_UPDATED;
785}
786
787static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
788 struct drm_plane_state *oldstate)
789{
790 struct sti_plane *plane = to_sti_plane(drm_plane);
791 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
792
793 if (!drm_plane->crtc) {
794 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
795 drm_plane->base.id);
796 return;
797 }
798
799 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
800 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
801 drm_plane->base.id, sti_plane_to_str(plane));
802
803 plane->status = STI_PLANE_DISABLING;
804}
805
806static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
807 .atomic_update = sti_hqvdp_atomic_update,
808 .atomic_disable = sti_hqvdp_atomic_disable,
809}; 809};
810 810
811struct sti_layer *sti_hqvdp_create(struct device *dev) 811static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
812 struct device *dev, int desc)
812{ 813{
813 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 814 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
815 int res;
816
817 hqvdp->plane.desc = desc;
818 hqvdp->plane.status = STI_PLANE_DISABLED;
814 819
815 hqvdp->layer.ops = &hqvdp_ops; 820 sti_hqvdp_init(hqvdp);
816 821
817 return &hqvdp->layer; 822 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
823 &sti_plane_helpers_funcs,
824 hqvdp_supported_formats,
825 ARRAY_SIZE(hqvdp_supported_formats),
826 DRM_PLANE_TYPE_OVERLAY);
827 if (res) {
828 DRM_ERROR("Failed to initialize universal plane\n");
829 return NULL;
830 }
831
832 drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
833
834 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
835
836 return &hqvdp->plane.drm_plane;
818} 837}
819EXPORT_SYMBOL(sti_hqvdp_create);
820 838
821static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) 839static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
822{ 840{
@@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
859 } *header; 877 } *header;
860 878
861 DRM_DEBUG_DRIVER("\n"); 879 DRM_DEBUG_DRIVER("\n");
880
881 if (hqvdp->xp70_initialized) {
882 DRM_INFO("HQVDP XP70 already initialized\n");
883 return;
884 }
885
862 /* Check firmware parts */ 886 /* Check firmware parts */
863 if (!firmware) { 887 if (!firmware) {
864 DRM_ERROR("Firmware not available\n"); 888 DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
946 /* Launch Vsync */ 970 /* Launch Vsync */
947 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); 971 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
948 972
949 DRM_INFO("HQVDP XP70 started\n"); 973 DRM_INFO("HQVDP XP70 initialized\n");
974
975 hqvdp->xp70_initialized = true;
976
950out: 977out:
951 release_firmware(firmware); 978 release_firmware(firmware);
952} 979}
@@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
955{ 982{
956 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 983 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
957 struct drm_device *drm_dev = data; 984 struct drm_device *drm_dev = data;
958 struct sti_layer *layer; 985 struct drm_plane *plane;
959 int err; 986 int err;
960 987
961 DRM_DEBUG_DRIVER("\n"); 988 DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
971 return err; 998 return err;
972 } 999 }
973 1000
974 layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs); 1001 /* Create HQVDP plane once xp70 is initialized */
975 if (!layer) { 1002 plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
1003 if (!plane)
976 DRM_ERROR("Can't create HQVDP plane\n"); 1004 DRM_ERROR("Can't create HQVDP plane\n");
977 return -ENOMEM;
978 }
979
980 sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
981 1005
982 return 0; 1006 return 0;
983} 1007}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0a6dea..000000000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HQVDP_H_
8#define _STI_HQVDP_H_
9
10struct sti_layer *sti_hqvdp_create(struct device *dev);
11
12#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f9d4bc..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_gem_cma_helper.h>
11#include <drm/drm_fb_cma_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_cursor.h"
15#include "sti_gdp.h"
16#include "sti_hqvdp.h"
17#include "sti_layer.h"
18#include "sti_vid.h"
19
20const char *sti_layer_to_str(struct sti_layer *layer)
21{
22 switch (layer->desc) {
23 case STI_GDP_0:
24 return "GDP0";
25 case STI_GDP_1:
26 return "GDP1";
27 case STI_GDP_2:
28 return "GDP2";
29 case STI_GDP_3:
30 return "GDP3";
31 case STI_VID_0:
32 return "VID0";
33 case STI_VID_1:
34 return "VID1";
35 case STI_CURSOR:
36 return "CURSOR";
37 case STI_HQVDP_0:
38 return "HQVDP0";
39 default:
40 return "<UNKNOWN LAYER>";
41 }
42}
43EXPORT_SYMBOL(sti_layer_to_str);
44
45struct sti_layer *sti_layer_create(struct device *dev, int desc,
46 void __iomem *baseaddr)
47{
48
49 struct sti_layer *layer = NULL;
50
51 switch (desc & STI_LAYER_TYPE_MASK) {
52 case STI_GDP:
53 layer = sti_gdp_create(dev, desc);
54 break;
55 case STI_VID:
56 layer = sti_vid_create(dev);
57 break;
58 case STI_CUR:
59 layer = sti_cursor_create(dev);
60 break;
61 case STI_VDP:
62 layer = sti_hqvdp_create(dev);
63 break;
64 }
65
66 if (!layer) {
67 DRM_ERROR("Failed to create layer\n");
68 return NULL;
69 }
70
71 layer->desc = desc;
72 layer->dev = dev;
73 layer->regs = baseaddr;
74
75 layer->ops->init(layer);
76
77 DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
78
79 return layer;
80}
81EXPORT_SYMBOL(sti_layer_create);
82
83int sti_layer_prepare(struct sti_layer *layer,
84 struct drm_crtc *crtc,
85 struct drm_framebuffer *fb,
86 struct drm_display_mode *mode, int mixer_id,
87 int dest_x, int dest_y, int dest_w, int dest_h,
88 int src_x, int src_y, int src_w, int src_h)
89{
90 int ret;
91 unsigned int i;
92 struct drm_gem_cma_object *cma_obj;
93
94 if (!layer || !fb || !mode) {
95 DRM_ERROR("Null fb, layer or mode\n");
96 return 1;
97 }
98
99 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
100 if (!cma_obj) {
101 DRM_ERROR("Can't get CMA GEM object for fb\n");
102 return 1;
103 }
104
105 layer->crtc = crtc;
106 layer->fb = fb;
107 layer->mode = mode;
108 layer->mixer_id = mixer_id;
109 layer->dst_x = dest_x;
110 layer->dst_y = dest_y;
111 layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
112 layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
113 layer->src_x = src_x;
114 layer->src_y = src_y;
115 layer->src_w = src_w;
116 layer->src_h = src_h;
117 layer->format = fb->pixel_format;
118 layer->vaddr = cma_obj->vaddr;
119 layer->paddr = cma_obj->paddr;
120 for (i = 0; i < 4; i++) {
121 layer->pitches[i] = fb->pitches[i];
122 layer->offsets[i] = fb->offsets[i];
123 }
124
125 DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
126 sti_layer_to_str(layer),
127 layer->mixer_id);
128 DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
129 sti_layer_to_str(layer),
130 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
131 layer->src_w, layer->src_h, layer->src_x,
132 layer->src_y);
133
134 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
135 (char *)&layer->format, (unsigned long)layer->paddr);
136
137 if (!layer->ops->prepare)
138 goto err_no_prepare;
139
140 ret = layer->ops->prepare(layer, !layer->enabled);
141 if (!ret)
142 layer->enabled = true;
143
144 return ret;
145
146err_no_prepare:
147 DRM_ERROR("Cannot prepare\n");
148 return 1;
149}
150
151int sti_layer_commit(struct sti_layer *layer)
152{
153 if (!layer)
154 return 1;
155
156 if (!layer->ops->commit)
157 goto err_no_commit;
158
159 return layer->ops->commit(layer);
160
161err_no_commit:
162 DRM_ERROR("Cannot commit\n");
163 return 1;
164}
165
166int sti_layer_disable(struct sti_layer *layer)
167{
168 int ret;
169
170 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
171 if (!layer)
172 return 1;
173
174 if (!layer->enabled)
175 return 0;
176
177 if (!layer->ops->disable)
178 goto err_no_disable;
179
180 ret = layer->ops->disable(layer);
181 if (!ret)
182 layer->enabled = false;
183 else
184 DRM_ERROR("Disable failed\n");
185
186 return ret;
187
188err_no_disable:
189 DRM_ERROR("Cannot disable\n");
190 return 1;
191}
192
193const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
194{
195 if (!layer)
196 return NULL;
197
198 if (!layer->ops->get_formats)
199 return NULL;
200
201 return layer->ops->get_formats(layer);
202}
203
204unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
205{
206 if (!layer)
207 return 0;
208
209 if (!layer->ops->get_nb_formats)
210 return 0;
211
212 return layer->ops->get_nb_formats(layer);
213}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497f557e..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_LAYER_H_
10#define _STI_LAYER_H_
11
12#include <drm/drmP.h>
13
14#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
15
16#define STI_LAYER_TYPE_SHIFT 8
17#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
18
19struct sti_layer;
20
21enum sti_layer_type {
22 STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
23 STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
24 STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
25 STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
26 STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
27};
28
29enum sti_layer_id_of_type {
30 STI_ID_0 = 0,
31 STI_ID_1 = 1,
32 STI_ID_2 = 2,
33 STI_ID_3 = 3
34};
35
36enum sti_layer_desc {
37 STI_GDP_0 = STI_GDP | STI_ID_0,
38 STI_GDP_1 = STI_GDP | STI_ID_1,
39 STI_GDP_2 = STI_GDP | STI_ID_2,
40 STI_GDP_3 = STI_GDP | STI_ID_3,
41 STI_VID_0 = STI_VID | STI_ID_0,
42 STI_VID_1 = STI_VID | STI_ID_1,
43 STI_HQVDP_0 = STI_VDP | STI_ID_0,
44 STI_CURSOR = STI_CUR,
45 STI_BACK = STI_BCK
46};
47
48/**
49 * STI layer functions structure
50 *
51 * @get_formats: get layer supported formats
52 * @get_nb_formats: get number of format supported
53 * @init: initialize the layer
54 * @prepare: prepare layer before rendering
55 * @commit: set layer for rendering
56 * @disable: disable layer
57 */
58struct sti_layer_funcs {
59 const uint32_t* (*get_formats)(struct sti_layer *layer);
60 unsigned int (*get_nb_formats)(struct sti_layer *layer);
61 void (*init)(struct sti_layer *layer);
62 int (*prepare)(struct sti_layer *layer, bool first_prepare);
63 int (*commit)(struct sti_layer *layer);
64 int (*disable)(struct sti_layer *layer);
65};
66
67/**
68 * STI layer structure
69 *
70 * @plane: drm plane it is bound to (if any)
71 * @fb: drm fb it is bound to
72 * @crtc: crtc it is bound to
73 * @mode: display mode
74 * @desc: layer type & id
75 * @device: driver device
76 * @regs: layer registers
77 * @ops: layer functions
78 * @zorder: layer z-order
79 * @mixer_id: id of the mixer used to display the layer
80 * @enabled: to know if the layer is active or not
81 * @src_x src_y: coordinates of the input (fb) area
82 * @src_w src_h: size of the input (fb) area
83 * @dst_x dst_y: coordinates of the output (crtc) area
84 * @dst_w dst_h: size of the output (crtc) area
85 * @format: format
86 * @pitches: pitch of 'planes' (eg: Y, U, V)
87 * @offsets: offset of 'planes'
88 * @vaddr: virtual address of the input buffer
89 * @paddr: physical address of the input buffer
90 */
91struct sti_layer {
92 struct drm_plane plane;
93 struct drm_framebuffer *fb;
94 struct drm_crtc *crtc;
95 struct drm_display_mode *mode;
96 enum sti_layer_desc desc;
97 struct device *dev;
98 void __iomem *regs;
99 const struct sti_layer_funcs *ops;
100 int zorder;
101 int mixer_id;
102 bool enabled;
103 int src_x, src_y;
104 int src_w, src_h;
105 int dst_x, dst_y;
106 int dst_w, dst_h;
107 uint32_t format;
108 unsigned int pitches[4];
109 unsigned int offsets[4];
110 void *vaddr;
111 dma_addr_t paddr;
112};
113
114struct sti_layer *sti_layer_create(struct device *dev, int desc,
115 void __iomem *baseaddr);
116int sti_layer_prepare(struct sti_layer *layer,
117 struct drm_crtc *crtc,
118 struct drm_framebuffer *fb,
119 struct drm_display_mode *mode,
120 int mixer_id,
121 int dest_x, int dest_y,
122 int dest_w, int dest_h,
123 int src_x, int src_y,
124 int src_w, int src_h);
125int sti_layer_commit(struct sti_layer *layer);
126int sti_layer_disable(struct sti_layer *layer);
127const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
128unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
129const char *sti_layer_to_str(struct sti_layer *layer);
130
131#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84deab6..0182e9365004 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer)
58 return "<UNKNOWN MIXER>"; 58 return "<UNKNOWN MIXER>";
59 } 59 }
60} 60}
61EXPORT_SYMBOL(sti_mixer_to_str);
61 62
62static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id) 63static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
63{ 64{
@@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
101 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds); 102 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
102} 103}
103 104
104int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) 105int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
105{ 106{
106 int layer_id = 0, depth = layer->zorder; 107 int plane_id, depth = plane->zorder;
108 unsigned int i;
107 u32 mask, val; 109 u32 mask, val;
108 110
109 if (depth >= GAM_MIXER_NB_DEPTH_LEVEL) 111 if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
110 return 1; 112 return 1;
111 113
112 switch (layer->desc) { 114 switch (plane->desc) {
113 case STI_GDP_0: 115 case STI_GDP_0:
114 layer_id = GAM_DEPTH_GDP0_ID; 116 plane_id = GAM_DEPTH_GDP0_ID;
115 break; 117 break;
116 case STI_GDP_1: 118 case STI_GDP_1:
117 layer_id = GAM_DEPTH_GDP1_ID; 119 plane_id = GAM_DEPTH_GDP1_ID;
118 break; 120 break;
119 case STI_GDP_2: 121 case STI_GDP_2:
120 layer_id = GAM_DEPTH_GDP2_ID; 122 plane_id = GAM_DEPTH_GDP2_ID;
121 break; 123 break;
122 case STI_GDP_3: 124 case STI_GDP_3:
123 layer_id = GAM_DEPTH_GDP3_ID; 125 plane_id = GAM_DEPTH_GDP3_ID;
124 break; 126 break;
125 case STI_VID_0:
126 case STI_HQVDP_0: 127 case STI_HQVDP_0:
127 layer_id = GAM_DEPTH_VID0_ID; 128 plane_id = GAM_DEPTH_VID0_ID;
128 break;
129 case STI_VID_1:
130 layer_id = GAM_DEPTH_VID1_ID;
131 break; 129 break;
132 case STI_CURSOR: 130 case STI_CURSOR:
133 /* no need to set depth for cursor */ 131 /* no need to set depth for cursor */
134 return 0; 132 return 0;
135 default: 133 default:
136 DRM_ERROR("Unknown layer %d\n", layer->desc); 134 DRM_ERROR("Unknown plane %d\n", plane->desc);
137 return 1; 135 return 1;
138 } 136 }
139 mask = GAM_DEPTH_MASK_ID << (3 * depth); 137
140 layer_id = layer_id << (3 * depth); 138 /* Search if a previous depth was already assigned to the plane */
139 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
140 for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
141 mask = GAM_DEPTH_MASK_ID << (3 * i);
142 if ((val & mask) == plane_id << (3 * i))
143 break;
144 }
145
146 mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
147 plane_id = plane_id << (3 * (depth - 1));
141 148
142 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), 149 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
143 sti_layer_to_str(layer), depth); 150 sti_plane_to_str(plane), depth);
144 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", 151 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
145 layer_id, mask); 152 plane_id, mask);
146 153
147 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
148 val &= ~mask; 154 val &= ~mask;
149 val |= layer_id; 155 val |= plane_id;
150 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); 156 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
151 157
152 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", 158 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
176 return 0; 182 return 0;
177} 183}
178 184
179static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) 185static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
180{ 186{
181 switch (layer->desc) { 187 switch (plane->desc) {
182 case STI_BACK: 188 case STI_BACK:
183 return GAM_CTL_BACK_MASK; 189 return GAM_CTL_BACK_MASK;
184 case STI_GDP_0: 190 case STI_GDP_0:
@@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
189 return GAM_CTL_GDP2_MASK; 195 return GAM_CTL_GDP2_MASK;
190 case STI_GDP_3: 196 case STI_GDP_3:
191 return GAM_CTL_GDP3_MASK; 197 return GAM_CTL_GDP3_MASK;
192 case STI_VID_0:
193 case STI_HQVDP_0: 198 case STI_HQVDP_0:
194 return GAM_CTL_VID0_MASK; 199 return GAM_CTL_VID0_MASK;
195 case STI_VID_1:
196 return GAM_CTL_VID1_MASK;
197 case STI_CURSOR: 200 case STI_CURSOR:
198 return GAM_CTL_CURSOR_MASK; 201 return GAM_CTL_CURSOR_MASK;
199 default: 202 default:
@@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
201 } 204 }
202} 205}
203 206
204int sti_mixer_set_layer_status(struct sti_mixer *mixer, 207int sti_mixer_set_plane_status(struct sti_mixer *mixer,
205 struct sti_layer *layer, bool status) 208 struct sti_plane *plane, bool status)
206{ 209{
207 u32 mask, val; 210 u32 mask, val;
208 211
209 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", 212 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
210 sti_mixer_to_str(mixer), sti_layer_to_str(layer)); 213 sti_mixer_to_str(mixer), sti_plane_to_str(plane));
211 214
212 mask = sti_mixer_get_layer_mask(layer); 215 mask = sti_mixer_get_plane_mask(plane);
213 if (!mask) { 216 if (!mask) {
214 DRM_ERROR("Can not find layer mask\n"); 217 DRM_ERROR("Can't find layer mask\n");
215 return -EINVAL; 218 return -EINVAL;
216 } 219 }
217 220
@@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
223 return 0; 226 return 0;
224} 227}
225 228
226void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
227{
228 u32 val;
229
230 DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
231 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
232 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
233}
234
235void sti_mixer_set_matrix(struct sti_mixer *mixer) 229void sti_mixer_set_matrix(struct sti_mixer *mixer)
236{ 230{
237 unsigned int i; 231 unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b97282182908..efb1a9a5ba86 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13 13
14#include "sti_layer.h" 14#include "sti_plane.h"
15 15
16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) 16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
17 17
18enum sti_mixer_status {
19 STI_MIXER_READY,
20 STI_MIXER_DISABLING,
21 STI_MIXER_DISABLED,
22};
23
18/** 24/**
19 * STI Mixer subdevice structure 25 * STI Mixer subdevice structure
20 * 26 *
@@ -23,33 +29,32 @@
23 * @id: id of the mixer 29 * @id: id of the mixer
24 * @drm_crtc: crtc object link to the mixer 30 * @drm_crtc: crtc object link to the mixer
25 * @pending_event: set if a flip event is pending on crtc 31 * @pending_event: set if a flip event is pending on crtc
26 * @enabled: to know if the mixer is active or not 32 * @status: to know the status of the mixer
27 */ 33 */
28struct sti_mixer { 34struct sti_mixer {
29 struct device *dev; 35 struct device *dev;
30 void __iomem *regs; 36 void __iomem *regs;
31 int id; 37 int id;
32 struct drm_crtc drm_crtc; 38 struct drm_crtc drm_crtc;
33 struct drm_pending_vblank_event *pending_event; 39 struct drm_pending_vblank_event *pending_event;
34 bool enabled; 40 enum sti_mixer_status status;
35}; 41};
36 42
37const char *sti_mixer_to_str(struct sti_mixer *mixer); 43const char *sti_mixer_to_str(struct sti_mixer *mixer);
38 44
39struct sti_mixer *sti_mixer_create(struct device *dev, int id, 45struct sti_mixer *sti_mixer_create(struct device *dev, int id,
40 void __iomem *baseaddr); 46 void __iomem *baseaddr);
41 47
42int sti_mixer_set_layer_status(struct sti_mixer *mixer, 48int sti_mixer_set_plane_status(struct sti_mixer *mixer,
43 struct sti_layer *layer, bool status); 49 struct sti_plane *plane, bool status);
44void sti_mixer_clear_all_layers(struct sti_mixer *mixer); 50int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
45int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
46int sti_mixer_active_video_area(struct sti_mixer *mixer, 51int sti_mixer_active_video_area(struct sti_mixer *mixer,
47 struct drm_display_mode *mode); 52 struct drm_display_mode *mode);
48 53
49void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 54void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
50 55
51/* depth in Cross-bar control = z order */ 56/* depth in Cross-bar control = z order */
52#define GAM_MIXER_NB_DEPTH_LEVEL 7 57#define GAM_MIXER_NB_DEPTH_LEVEL 6
53 58
54#define STI_MIXER_MAIN 0 59#define STI_MIXER_MAIN 0
55#define STI_MIXER_AUX 1 60#define STI_MIXER_AUX 1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 000000000000..d5c5e91f2956
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_fb_cma_helper.h>
11#include <drm/drm_gem_cma_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_drv.h"
15#include "sti_plane.h"
16
17/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
18enum sti_plane_desc sti_plane_default_zorder[] = {
19 STI_GDP_0,
20 STI_GDP_1,
21 STI_HQVDP_0,
22 STI_GDP_2,
23 STI_GDP_3,
24};
25
26const char *sti_plane_to_str(struct sti_plane *plane)
27{
28 switch (plane->desc) {
29 case STI_GDP_0:
30 return "GDP0";
31 case STI_GDP_1:
32 return "GDP1";
33 case STI_GDP_2:
34 return "GDP2";
35 case STI_GDP_3:
36 return "GDP3";
37 case STI_HQVDP_0:
38 return "HQVDP0";
39 case STI_CURSOR:
40 return "CURSOR";
41 default:
42 return "<UNKNOWN PLANE>";
43 }
44}
45EXPORT_SYMBOL(sti_plane_to_str);
46
47static void sti_plane_destroy(struct drm_plane *drm_plane)
48{
49 DRM_DEBUG_DRIVER("\n");
50
51 drm_plane_helper_disable(drm_plane);
52 drm_plane_cleanup(drm_plane);
53}
54
55static int sti_plane_set_property(struct drm_plane *drm_plane,
56 struct drm_property *property,
57 uint64_t val)
58{
59 struct drm_device *dev = drm_plane->dev;
60 struct sti_private *private = dev->dev_private;
61 struct sti_plane *plane = to_sti_plane(drm_plane);
62
63 DRM_DEBUG_DRIVER("\n");
64
65 if (property == private->plane_zorder_property) {
66 plane->zorder = val;
67 return 0;
68 }
69
70 return -EINVAL;
71}
72
73static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
74{
75 struct drm_device *dev = drm_plane->dev;
76 struct sti_private *private = dev->dev_private;
77 struct sti_plane *plane = to_sti_plane(drm_plane);
78 struct drm_property *prop;
79
80 prop = private->plane_zorder_property;
81 if (!prop) {
82 prop = drm_property_create_range(dev, 0, "zpos", 1,
83 GAM_MIXER_NB_DEPTH_LEVEL);
84 if (!prop)
85 return;
86
87 private->plane_zorder_property = prop;
88 }
89
90 drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
91}
92
93void sti_plane_init_property(struct sti_plane *plane,
94 enum drm_plane_type type)
95{
96 unsigned int i;
97
98 for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
99 if (sti_plane_default_zorder[i] == plane->desc)
100 break;
101
102 plane->zorder = i + 1;
103
104 if (type == DRM_PLANE_TYPE_OVERLAY)
105 sti_plane_attach_zorder_property(&plane->drm_plane);
106
107 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
108 plane->drm_plane.base.id,
109 sti_plane_to_str(plane), plane->zorder);
110}
111EXPORT_SYMBOL(sti_plane_init_property);
112
113struct drm_plane_funcs sti_plane_helpers_funcs = {
114 .update_plane = drm_atomic_helper_update_plane,
115 .disable_plane = drm_atomic_helper_disable_plane,
116 .destroy = sti_plane_destroy,
117 .set_property = sti_plane_set_property,
118 .reset = drm_atomic_helper_plane_reset,
119 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
120 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
121};
122EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 000000000000..86f1e6fc81b9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_PLANE_H_
8#define _STI_PLANE_H_
9
10#include <drm/drmP.h>
11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_plane_helper.h>
13
14extern struct drm_plane_funcs sti_plane_helpers_funcs;
15
16#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
17
18#define STI_PLANE_TYPE_SHIFT 8
19#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
20
21enum sti_plane_type {
22 STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
23 STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
24 STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
25 STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
26};
27
28enum sti_plane_id_of_type {
29 STI_ID_0 = 0,
30 STI_ID_1 = 1,
31 STI_ID_2 = 2,
32 STI_ID_3 = 3
33};
34
35enum sti_plane_desc {
36 STI_GDP_0 = STI_GDP | STI_ID_0,
37 STI_GDP_1 = STI_GDP | STI_ID_1,
38 STI_GDP_2 = STI_GDP | STI_ID_2,
39 STI_GDP_3 = STI_GDP | STI_ID_3,
40 STI_HQVDP_0 = STI_VDP | STI_ID_0,
41 STI_CURSOR = STI_CUR,
42 STI_BACK = STI_BCK
43};
44
45enum sti_plane_status {
46 STI_PLANE_READY,
47 STI_PLANE_UPDATED,
48 STI_PLANE_DISABLING,
49 STI_PLANE_FLUSHING,
50 STI_PLANE_DISABLED,
51};
52
53/**
54 * STI plane structure
55 *
56 * @plane: drm plane it is bound to (if any)
57 * @desc: plane type & id
58 * @status: to know the status of the plane
59 * @zorder: plane z-order
60 */
61struct sti_plane {
62 struct drm_plane drm_plane;
63 enum sti_plane_desc desc;
64 enum sti_plane_status status;
65 int zorder;
66};
67
68const char *sti_plane_to_str(struct sti_plane *plane);
69void sti_plane_init_property(struct sti_plane *plane,
70 enum drm_plane_type type);
71#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc53116508e..c1aac8e66fb5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18 18
19#include "sti_drm_crtc.h" 19#include "sti_crtc.h"
20 20
21/* glue registers */ 21/* glue registers */
22#define TVO_CSC_MAIN_M0 0x000 22#define TVO_CSC_MAIN_M0 0x000
@@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
473{ 473{
474 struct sti_tvout *tvout = to_sti_tvout(encoder); 474 struct sti_tvout *tvout = to_sti_tvout(encoder);
475 475
476 tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 476 tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
477} 477}
478 478
479static void sti_dvo_encoder_disable(struct drm_encoder *encoder) 479static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
523{ 523{
524 struct sti_tvout *tvout = to_sti_tvout(encoder); 524 struct sti_tvout *tvout = to_sti_tvout(encoder);
525 525
526 tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 526 tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
527} 527}
528 528
529static void sti_hda_encoder_disable(struct drm_encoder *encoder) 529static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
575{ 575{
576 struct sti_tvout *tvout = to_sti_tvout(encoder); 576 struct sti_tvout *tvout = to_sti_tvout(encoder);
577 577
578 tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 578 tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
579} 579}
580 580
581static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) 581static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
644 struct sti_tvout *tvout = dev_get_drvdata(dev); 644 struct sti_tvout *tvout = dev_get_drvdata(dev);
645 struct drm_device *drm_dev = data; 645 struct drm_device *drm_dev = data;
646 unsigned int i; 646 unsigned int i;
647 int ret;
648 647
649 tvout->drm_dev = drm_dev; 648 tvout->drm_dev = drm_dev;
650 649
@@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
658 657
659 sti_tvout_create_encoders(drm_dev, tvout); 658 sti_tvout_create_encoders(drm_dev, tvout);
660 659
661 ret = component_bind_all(dev, drm_dev); 660 return 0;
662 if (ret)
663 sti_tvout_destroy_encoders(tvout);
664
665 return ret;
666} 661}
667 662
668static void sti_tvout_unbind(struct device *dev, struct device *master, 663static void sti_tvout_unbind(struct device *dev, struct device *master,
669 void *data) 664 void *data)
670{ 665{
671 /* do nothing */ 666 struct sti_tvout *tvout = dev_get_drvdata(dev);
667
668 sti_tvout_destroy_encoders(tvout);
672} 669}
673 670
674static const struct component_ops sti_tvout_ops = { 671static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = {
676 .unbind = sti_tvout_unbind, 673 .unbind = sti_tvout_unbind,
677}; 674};
678 675
679static int compare_of(struct device *dev, void *data)
680{
681 return dev->of_node == data;
682}
683
684static int sti_tvout_master_bind(struct device *dev)
685{
686 return 0;
687}
688
689static void sti_tvout_master_unbind(struct device *dev)
690{
691 /* do nothing */
692}
693
694static const struct component_master_ops sti_tvout_master_ops = {
695 .bind = sti_tvout_master_bind,
696 .unbind = sti_tvout_master_unbind,
697};
698
699static int sti_tvout_probe(struct platform_device *pdev) 676static int sti_tvout_probe(struct platform_device *pdev)
700{ 677{
701 struct device *dev = &pdev->dev; 678 struct device *dev = &pdev->dev;
702 struct device_node *node = dev->of_node; 679 struct device_node *node = dev->of_node;
703 struct sti_tvout *tvout; 680 struct sti_tvout *tvout;
704 struct resource *res; 681 struct resource *res;
705 struct device_node *child_np;
706 struct component_match *match = NULL;
707 682
708 DRM_INFO("%s\n", __func__); 683 DRM_INFO("%s\n", __func__);
709 684
@@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev)
734 709
735 platform_set_drvdata(pdev, tvout); 710 platform_set_drvdata(pdev, tvout);
736 711
737 of_platform_populate(node, NULL, NULL, dev);
738
739 child_np = of_get_next_available_child(node, NULL);
740
741 while (child_np) {
742 component_match_add(dev, &match, compare_of, child_np);
743 of_node_put(child_np);
744 child_np = of_get_next_available_child(node, child_np);
745 }
746
747 component_master_add_with_match(dev, &sti_tvout_master_ops, match);
748
749 return component_add(dev, &sti_tvout_ops); 712 return component_add(dev, &sti_tvout_ops);
750} 713}
751 714
752static int sti_tvout_remove(struct platform_device *pdev) 715static int sti_tvout_remove(struct platform_device *pdev)
753{ 716{
754 component_master_del(&pdev->dev, &sti_tvout_master_ops);
755 component_del(&pdev->dev, &sti_tvout_ops); 717 component_del(&pdev->dev, &sti_tvout_ops);
756 return 0; 718 return 0;
757} 719}
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a479f4..a8254cc362a1 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
6 6
7#include <drm/drmP.h> 7#include <drm/drmP.h>
8 8
9#include "sti_layer.h" 9#include "sti_plane.h"
10#include "sti_vid.h" 10#include "sti_vid.h"
11#include "sti_vtg.h" 11#include "sti_vtg.h"
12 12
@@ -43,35 +43,37 @@
43#define VID_MPR2_BT709 0x07150545 43#define VID_MPR2_BT709 0x07150545
44#define VID_MPR3_BT709 0x00000AE8 44#define VID_MPR3_BT709 0x00000AE8
45 45
46static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare) 46void sti_vid_commit(struct sti_vid *vid,
47 struct drm_plane_state *state)
47{ 48{
48 u32 val; 49 struct drm_crtc *crtc = state->crtc;
50 struct drm_display_mode *mode = &crtc->mode;
51 int dst_x = state->crtc_x;
52 int dst_y = state->crtc_y;
53 int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
54 int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
55 u32 val, ydo, xdo, yds, xds;
56
57 /* Input / output size
58 * Align to upper even value */
59 dst_w = ALIGN(dst_w, 2);
60 dst_h = ALIGN(dst_h, 2);
49 61
50 /* Unmask */ 62 /* Unmask */
51 val = readl(vid->regs + VID_CTL); 63 val = readl(vid->regs + VID_CTL);
52 val &= ~VID_CTL_IGNORE; 64 val &= ~VID_CTL_IGNORE;
53 writel(val, vid->regs + VID_CTL); 65 writel(val, vid->regs + VID_CTL);
54 66
55 return 0; 67 ydo = sti_vtg_get_line_number(*mode, dst_y);
56} 68 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
57 69 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
58static int sti_vid_commit_layer(struct sti_layer *vid) 70 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
59{
60 struct drm_display_mode *mode = vid->mode;
61 u32 ydo, xdo, yds, xds;
62
63 ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
64 yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
65 xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
66 xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
67 71
68 writel((ydo << 16) | xdo, vid->regs + VID_VPO); 72 writel((ydo << 16) | xdo, vid->regs + VID_VPO);
69 writel((yds << 16) | xds, vid->regs + VID_VPS); 73 writel((yds << 16) | xds, vid->regs + VID_VPS);
70
71 return 0;
72} 74}
73 75
74static int sti_vid_disable_layer(struct sti_layer *vid) 76void sti_vid_disable(struct sti_vid *vid)
75{ 77{
76 u32 val; 78 u32 val;
77 79
@@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
79 val = readl(vid->regs + VID_CTL); 81 val = readl(vid->regs + VID_CTL);
80 val |= VID_CTL_IGNORE; 82 val |= VID_CTL_IGNORE;
81 writel(val, vid->regs + VID_CTL); 83 writel(val, vid->regs + VID_CTL);
82
83 return 0;
84} 84}
85 85
86static const uint32_t *sti_vid_get_formats(struct sti_layer *layer) 86static void sti_vid_init(struct sti_vid *vid)
87{
88 return NULL;
89}
90
91static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
92{
93 return 0;
94}
95
96static void sti_vid_init(struct sti_layer *vid)
97{ 87{
98 /* Enable PSI, Mask layer */ 88 /* Enable PSI, Mask layer */
99 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL); 89 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid)
113 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT); 103 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
114} 104}
115 105
116static const struct sti_layer_funcs vid_ops = { 106struct sti_vid *sti_vid_create(struct device *dev, int id,
117 .get_formats = sti_vid_get_formats, 107 void __iomem *baseaddr)
118 .get_nb_formats = sti_vid_get_nb_formats,
119 .init = sti_vid_init,
120 .prepare = sti_vid_prepare_layer,
121 .commit = sti_vid_commit_layer,
122 .disable = sti_vid_disable_layer,
123};
124
125struct sti_layer *sti_vid_create(struct device *dev)
126{ 108{
127 struct sti_layer *vid; 109 struct sti_vid *vid;
128 110
129 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL); 111 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
130 if (!vid) { 112 if (!vid) {
@@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
132 return NULL; 114 return NULL;
133 } 115 }
134 116
135 vid->ops = &vid_ops; 117 vid->dev = dev;
118 vid->regs = baseaddr;
119 vid->id = id;
120
121 sti_vid_init(vid);
136 122
137 return vid; 123 return vid;
138} 124}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd63294..5dea4791f1d6 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
7#ifndef _STI_VID_H_ 7#ifndef _STI_VID_H_
8#define _STI_VID_H_ 8#define _STI_VID_H_
9 9
10struct sti_layer *sti_vid_create(struct device *dev); 10/**
11 * STI VID structure
12 *
13 * @dev: driver device
14 * @regs: vid registers
15 * @id: id of the vid
16 */
17struct sti_vid {
18 struct device *dev;
19 void __iomem *regs;
20 int id;
21};
22
23void sti_vid_commit(struct sti_vid *vid,
24 struct drm_plane_state *state);
25void sti_vid_disable(struct sti_vid *vid);
26struct sti_vid *sti_vid_create(struct device *dev, int id,
27 void __iomem *baseaddr);
11 28
12#endif 29#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index bf8ef3133e5b..ddefb85dc4f7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -76,6 +76,14 @@ to_tegra_plane_state(struct drm_plane_state *state)
76 return NULL; 76 return NULL;
77} 77}
78 78
79static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
80{
81 stats->frames = 0;
82 stats->vblank = 0;
83 stats->underflow = 0;
84 stats->overflow = 0;
85}
86
79/* 87/*
80 * Reads the active copy of a register. This takes the dc->lock spinlock to 88 * Reads the active copy of a register. This takes the dc->lock spinlock to
81 * prevent races with the VBLANK processing which also needs access to the 89 * prevent races with the VBLANK processing which also needs access to the
@@ -759,7 +767,6 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
759 /* position the cursor */ 767 /* position the cursor */
760 value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff); 768 value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
761 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); 769 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
762
763} 770}
764 771
765static void tegra_cursor_atomic_disable(struct drm_plane *plane, 772static void tegra_cursor_atomic_disable(struct drm_plane *plane,
@@ -809,9 +816,11 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
809 return ERR_PTR(-ENOMEM); 816 return ERR_PTR(-ENOMEM);
810 817
811 /* 818 /*
812 * We'll treat the cursor as an overlay plane with index 6 here so 819 * This index is kind of fake. The cursor isn't a regular plane, but
813 * that the update and activation request bits in DC_CMD_STATE_CONTROL 820 * its update and activation request bits in DC_CMD_STATE_CONTROL do
814 * match up. 821 * use the same programming. Setting this fake index here allows the
822 * code in tegra_add_plane_state() to do the right thing without the
823 * need to special-casing the cursor plane.
815 */ 824 */
816 plane->index = 6; 825 plane->index = 6;
817 826
@@ -1015,6 +1024,8 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
1015 crtc->state = &state->base; 1024 crtc->state = &state->base;
1016 crtc->state->crtc = crtc; 1025 crtc->state->crtc = crtc;
1017 } 1026 }
1027
1028 drm_crtc_vblank_reset(crtc);
1018} 1029}
1019 1030
1020static struct drm_crtc_state * 1031static struct drm_crtc_state *
@@ -1052,90 +1063,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
1052 .atomic_destroy_state = tegra_crtc_atomic_destroy_state, 1063 .atomic_destroy_state = tegra_crtc_atomic_destroy_state,
1053}; 1064};
1054 1065
1055static void tegra_dc_stop(struct tegra_dc *dc)
1056{
1057 u32 value;
1058
1059 /* stop the display controller */
1060 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
1061 value &= ~DISP_CTRL_MODE_MASK;
1062 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
1063
1064 tegra_dc_commit(dc);
1065}
1066
1067static bool tegra_dc_idle(struct tegra_dc *dc)
1068{
1069 u32 value;
1070
1071 value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
1072
1073 return (value & DISP_CTRL_MODE_MASK) == 0;
1074}
1075
1076static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
1077{
1078 timeout = jiffies + msecs_to_jiffies(timeout);
1079
1080 while (time_before(jiffies, timeout)) {
1081 if (tegra_dc_idle(dc))
1082 return 0;
1083
1084 usleep_range(1000, 2000);
1085 }
1086
1087 dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
1088 return -ETIMEDOUT;
1089}
1090
1091static void tegra_crtc_disable(struct drm_crtc *crtc)
1092{
1093 struct tegra_dc *dc = to_tegra_dc(crtc);
1094 u32 value;
1095
1096 if (!tegra_dc_idle(dc)) {
1097 tegra_dc_stop(dc);
1098
1099 /*
1100 * Ignore the return value, there isn't anything useful to do
1101 * in case this fails.
1102 */
1103 tegra_dc_wait_idle(dc, 100);
1104 }
1105
1106 /*
1107 * This should really be part of the RGB encoder driver, but clearing
1108 * these bits has the side-effect of stopping the display controller.
1109 * When that happens no VBLANK interrupts will be raised. At the same
1110 * time the encoder is disabled before the display controller, so the
1111 * above code is always going to timeout waiting for the controller
1112 * to go idle.
1113 *
1114 * Given the close coupling between the RGB encoder and the display
1115 * controller doing it here is still kind of okay. None of the other
1116 * encoder drivers require these bits to be cleared.
1117 *
1118 * XXX: Perhaps given that the display controller is switched off at
1119 * this point anyway maybe clearing these bits isn't even useful for
1120 * the RGB encoder?
1121 */
1122 if (dc->rgb) {
1123 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
1124 value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
1125 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
1126 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
1127 }
1128
1129 drm_crtc_vblank_off(crtc);
1130}
1131
1132static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
1133 const struct drm_display_mode *mode,
1134 struct drm_display_mode *adjusted)
1135{
1136 return true;
1137}
1138
1139static int tegra_dc_set_timings(struct tegra_dc *dc, 1066static int tegra_dc_set_timings(struct tegra_dc *dc,
1140 struct drm_display_mode *mode) 1067 struct drm_display_mode *mode)
1141{ 1068{
@@ -1229,7 +1156,85 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
1229 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); 1156 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
1230} 1157}
1231 1158
1232static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) 1159static void tegra_dc_stop(struct tegra_dc *dc)
1160{
1161 u32 value;
1162
1163 /* stop the display controller */
1164 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
1165 value &= ~DISP_CTRL_MODE_MASK;
1166 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
1167
1168 tegra_dc_commit(dc);
1169}
1170
1171static bool tegra_dc_idle(struct tegra_dc *dc)
1172{
1173 u32 value;
1174
1175 value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
1176
1177 return (value & DISP_CTRL_MODE_MASK) == 0;
1178}
1179
1180static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
1181{
1182 timeout = jiffies + msecs_to_jiffies(timeout);
1183
1184 while (time_before(jiffies, timeout)) {
1185 if (tegra_dc_idle(dc))
1186 return 0;
1187
1188 usleep_range(1000, 2000);
1189 }
1190
1191 dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
1192 return -ETIMEDOUT;
1193}
1194
1195static void tegra_crtc_disable(struct drm_crtc *crtc)
1196{
1197 struct tegra_dc *dc = to_tegra_dc(crtc);
1198 u32 value;
1199
1200 if (!tegra_dc_idle(dc)) {
1201 tegra_dc_stop(dc);
1202
1203 /*
1204 * Ignore the return value, there isn't anything useful to do
1205 * in case this fails.
1206 */
1207 tegra_dc_wait_idle(dc, 100);
1208 }
1209
1210 /*
1211 * This should really be part of the RGB encoder driver, but clearing
1212 * these bits has the side-effect of stopping the display controller.
1213 * When that happens no VBLANK interrupts will be raised. At the same
1214 * time the encoder is disabled before the display controller, so the
1215 * above code is always going to timeout waiting for the controller
1216 * to go idle.
1217 *
1218 * Given the close coupling between the RGB encoder and the display
1219 * controller doing it here is still kind of okay. None of the other
1220 * encoder drivers require these bits to be cleared.
1221 *
1222 * XXX: Perhaps given that the display controller is switched off at
1223 * this point anyway maybe clearing these bits isn't even useful for
1224 * the RGB encoder?
1225 */
1226 if (dc->rgb) {
1227 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
1228 value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
1229 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
1230 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
1231 }
1232
1233 tegra_dc_stats_reset(&dc->stats);
1234 drm_crtc_vblank_off(crtc);
1235}
1236
1237static void tegra_crtc_enable(struct drm_crtc *crtc)
1233{ 1238{
1234 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 1239 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
1235 struct tegra_dc_state *state = to_dc_state(crtc->state); 1240 struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1259,15 +1264,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1259 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); 1264 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
1260 1265
1261 tegra_dc_commit(dc); 1266 tegra_dc_commit(dc);
1262}
1263
1264static void tegra_crtc_prepare(struct drm_crtc *crtc)
1265{
1266 drm_crtc_vblank_off(crtc);
1267}
1268 1267
1269static void tegra_crtc_commit(struct drm_crtc *crtc)
1270{
1271 drm_crtc_vblank_on(crtc); 1268 drm_crtc_vblank_on(crtc);
1272} 1269}
1273 1270
@@ -1304,10 +1301,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
1304 1301
1305static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { 1302static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
1306 .disable = tegra_crtc_disable, 1303 .disable = tegra_crtc_disable,
1307 .mode_fixup = tegra_crtc_mode_fixup, 1304 .enable = tegra_crtc_enable,
1308 .mode_set_nofb = tegra_crtc_mode_set_nofb,
1309 .prepare = tegra_crtc_prepare,
1310 .commit = tegra_crtc_commit,
1311 .atomic_check = tegra_crtc_atomic_check, 1305 .atomic_check = tegra_crtc_atomic_check,
1312 .atomic_begin = tegra_crtc_atomic_begin, 1306 .atomic_begin = tegra_crtc_atomic_begin,
1313 .atomic_flush = tegra_crtc_atomic_flush, 1307 .atomic_flush = tegra_crtc_atomic_flush,
@@ -1325,6 +1319,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
1325 /* 1319 /*
1326 dev_dbg(dc->dev, "%s(): frame end\n", __func__); 1320 dev_dbg(dc->dev, "%s(): frame end\n", __func__);
1327 */ 1321 */
1322 dc->stats.frames++;
1328 } 1323 }
1329 1324
1330 if (status & VBLANK_INT) { 1325 if (status & VBLANK_INT) {
@@ -1333,12 +1328,21 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
1333 */ 1328 */
1334 drm_crtc_handle_vblank(&dc->base); 1329 drm_crtc_handle_vblank(&dc->base);
1335 tegra_dc_finish_page_flip(dc); 1330 tegra_dc_finish_page_flip(dc);
1331 dc->stats.vblank++;
1336 } 1332 }
1337 1333
1338 if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) { 1334 if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
1339 /* 1335 /*
1340 dev_dbg(dc->dev, "%s(): underflow\n", __func__); 1336 dev_dbg(dc->dev, "%s(): underflow\n", __func__);
1341 */ 1337 */
1338 dc->stats.underflow++;
1339 }
1340
1341 if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
1342 /*
1343 dev_dbg(dc->dev, "%s(): overflow\n", __func__);
1344 */
1345 dc->stats.overflow++;
1342 } 1346 }
1343 1347
1344 return IRQ_HANDLED; 1348 return IRQ_HANDLED;
@@ -1348,6 +1352,14 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
1348{ 1352{
1349 struct drm_info_node *node = s->private; 1353 struct drm_info_node *node = s->private;
1350 struct tegra_dc *dc = node->info_ent->data; 1354 struct tegra_dc *dc = node->info_ent->data;
1355 int err = 0;
1356
1357 drm_modeset_lock_crtc(&dc->base, NULL);
1358
1359 if (!dc->base.state->active) {
1360 err = -EBUSY;
1361 goto unlock;
1362 }
1351 1363
1352#define DUMP_REG(name) \ 1364#define DUMP_REG(name) \
1353 seq_printf(s, "%-40s %#05x %08x\n", #name, name, \ 1365 seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
@@ -1568,11 +1580,59 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
1568 1580
1569#undef DUMP_REG 1581#undef DUMP_REG
1570 1582
1583unlock:
1584 drm_modeset_unlock_crtc(&dc->base);
1585 return err;
1586}
1587
1588static int tegra_dc_show_crc(struct seq_file *s, void *data)
1589{
1590 struct drm_info_node *node = s->private;
1591 struct tegra_dc *dc = node->info_ent->data;
1592 int err = 0;
1593 u32 value;
1594
1595 drm_modeset_lock_crtc(&dc->base, NULL);
1596
1597 if (!dc->base.state->active) {
1598 err = -EBUSY;
1599 goto unlock;
1600 }
1601
1602 value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
1603 tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
1604 tegra_dc_commit(dc);
1605
1606 drm_crtc_wait_one_vblank(&dc->base);
1607 drm_crtc_wait_one_vblank(&dc->base);
1608
1609 value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
1610 seq_printf(s, "%08x\n", value);
1611
1612 tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
1613
1614unlock:
1615 drm_modeset_unlock_crtc(&dc->base);
1616 return err;
1617}
1618
1619static int tegra_dc_show_stats(struct seq_file *s, void *data)
1620{
1621 struct drm_info_node *node = s->private;
1622 struct tegra_dc *dc = node->info_ent->data;
1623
1624 seq_printf(s, "frames: %lu\n", dc->stats.frames);
1625 seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
1626 seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
1627 seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
1628
1571 return 0; 1629 return 0;
1572} 1630}
1573 1631
1574static struct drm_info_list debugfs_files[] = { 1632static struct drm_info_list debugfs_files[] = {
1575 { "regs", tegra_dc_show_regs, 0, NULL }, 1633 { "regs", tegra_dc_show_regs, 0, NULL },
1634 { "crc", tegra_dc_show_crc, 0, NULL },
1635 { "stats", tegra_dc_show_stats, 0, NULL },
1576}; 1636};
1577 1637
1578static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) 1638static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -1718,7 +1778,8 @@ static int tegra_dc_init(struct host1x_client *client)
1718 tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC); 1778 tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
1719 } 1779 }
1720 1780
1721 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; 1781 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1782 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1722 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); 1783 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1723 1784
1724 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | 1785 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
@@ -1734,15 +1795,19 @@ static int tegra_dc_init(struct host1x_client *client)
1734 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); 1795 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1735 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); 1796 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1736 1797
1737 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; 1798 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1799 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1738 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); 1800 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1739 1801
1740 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; 1802 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1803 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1741 tegra_dc_writel(dc, value, DC_CMD_INT_MASK); 1804 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1742 1805
1743 if (dc->soc->supports_border_color) 1806 if (dc->soc->supports_border_color)
1744 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); 1807 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1745 1808
1809 tegra_dc_stats_reset(&dc->stats);
1810
1746 return 0; 1811 return 0;
1747 1812
1748cleanup: 1813cleanup:
@@ -1828,8 +1893,20 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
1828 .has_powergate = true, 1893 .has_powergate = true,
1829}; 1894};
1830 1895
1896static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
1897 .supports_border_color = false,
1898 .supports_interlacing = true,
1899 .supports_cursor = true,
1900 .supports_block_linear = true,
1901 .pitch_align = 64,
1902 .has_powergate = true,
1903};
1904
1831static const struct of_device_id tegra_dc_of_match[] = { 1905static const struct of_device_id tegra_dc_of_match[] = {
1832 { 1906 {
1907 .compatible = "nvidia,tegra210-dc",
1908 .data = &tegra210_dc_soc_info,
1909 }, {
1833 .compatible = "nvidia,tegra124-dc", 1910 .compatible = "nvidia,tegra124-dc",
1834 .data = &tegra124_dc_soc_info, 1911 .data = &tegra124_dc_soc_info,
1835 }, { 1912 }, {
@@ -1959,6 +2036,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
1959 return -ENXIO; 2036 return -ENXIO;
1960 } 2037 }
1961 2038
2039 dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
2040 if (!dc->syncpt)
2041 dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
2042
1962 INIT_LIST_HEAD(&dc->client.list); 2043 INIT_LIST_HEAD(&dc->client.list);
1963 dc->client.ops = &dc_client_ops; 2044 dc->client.ops = &dc_client_ops;
1964 dc->client.dev = &pdev->dev; 2045 dc->client.dev = &pdev->dev;
@@ -1976,10 +2057,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
1976 return err; 2057 return err;
1977 } 2058 }
1978 2059
1979 dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
1980 if (!dc->syncpt)
1981 dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
1982
1983 platform_set_drvdata(pdev, dc); 2060 platform_set_drvdata(pdev, dc);
1984 2061
1985 return 0; 2062 return 0;
@@ -2018,7 +2095,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
2018struct platform_driver tegra_dc_driver = { 2095struct platform_driver tegra_dc_driver = {
2019 .driver = { 2096 .driver = {
2020 .name = "tegra-dc", 2097 .name = "tegra-dc",
2021 .owner = THIS_MODULE,
2022 .of_match_table = tegra_dc_of_match, 2098 .of_match_table = tegra_dc_of_match,
2023 }, 2099 },
2024 .probe = tegra_dc_probe, 2100 .probe = tegra_dc_probe,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 55792daabbb5..4a268635749b 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -86,6 +86,11 @@
86#define DC_CMD_REG_ACT_CONTROL 0x043 86#define DC_CMD_REG_ACT_CONTROL 0x043
87 87
88#define DC_COM_CRC_CONTROL 0x300 88#define DC_COM_CRC_CONTROL 0x300
89#define DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
90#define DC_COM_CRC_CONTROL_FULL_FRAME (0 << 2)
91#define DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
92#define DC_COM_CRC_CONTROL_WAIT (1 << 1)
93#define DC_COM_CRC_CONTROL_ENABLE (1 << 0)
89#define DC_COM_CRC_CHECKSUM 0x301 94#define DC_COM_CRC_CHECKSUM 0x301
90#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x)) 95#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
91#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x)) 96#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
@@ -114,15 +119,17 @@
114#define DC_COM_CRC_CHECKSUM_LATCHED 0x329 119#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
115 120
116#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400 121#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
117#define H_PULSE_0_ENABLE (1 << 8) 122#define H_PULSE0_ENABLE (1 << 8)
118#define H_PULSE_1_ENABLE (1 << 10) 123#define H_PULSE1_ENABLE (1 << 10)
119#define H_PULSE_2_ENABLE (1 << 12) 124#define H_PULSE2_ENABLE (1 << 12)
120 125
121#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 126#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
122 127
123#define DC_DISP_DISP_WIN_OPTIONS 0x402 128#define DC_DISP_DISP_WIN_OPTIONS 0x402
124#define HDMI_ENABLE (1 << 30) 129#define HDMI_ENABLE (1 << 30)
125#define DSI_ENABLE (1 << 29) 130#define DSI_ENABLE (1 << 29)
131#define SOR1_TIMING_CYA (1 << 27)
132#define SOR1_ENABLE (1 << 26)
126#define SOR_ENABLE (1 << 25) 133#define SOR_ENABLE (1 << 25)
127#define CURSOR_ENABLE (1 << 16) 134#define CURSOR_ENABLE (1 << 16)
128 135
@@ -242,9 +249,20 @@
242#define BASE_COLOR_SIZE565 (6 << 0) 249#define BASE_COLOR_SIZE565 (6 << 0)
243#define BASE_COLOR_SIZE332 (7 << 0) 250#define BASE_COLOR_SIZE332 (7 << 0)
244#define BASE_COLOR_SIZE888 (8 << 0) 251#define BASE_COLOR_SIZE888 (8 << 0)
252#define DITHER_CONTROL_MASK (3 << 8)
245#define DITHER_CONTROL_DISABLE (0 << 8) 253#define DITHER_CONTROL_DISABLE (0 << 8)
246#define DITHER_CONTROL_ORDERED (2 << 8) 254#define DITHER_CONTROL_ORDERED (2 << 8)
247#define DITHER_CONTROL_ERRDIFF (3 << 8) 255#define DITHER_CONTROL_ERRDIFF (3 << 8)
256#define BASE_COLOR_SIZE_MASK (0xf << 0)
257#define BASE_COLOR_SIZE_666 (0 << 0)
258#define BASE_COLOR_SIZE_111 (1 << 0)
259#define BASE_COLOR_SIZE_222 (2 << 0)
260#define BASE_COLOR_SIZE_333 (3 << 0)
261#define BASE_COLOR_SIZE_444 (4 << 0)
262#define BASE_COLOR_SIZE_555 (5 << 0)
263#define BASE_COLOR_SIZE_565 (6 << 0)
264#define BASE_COLOR_SIZE_332 (7 << 0)
265#define BASE_COLOR_SIZE_888 (8 << 0)
248 266
249#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 267#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
250#define SC1_H_QUALIFIER_NONE (1 << 16) 268#define SC1_H_QUALIFIER_NONE (1 << 16)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 07b26972f487..224a7dc8e4ed 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -294,26 +294,41 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
294 } 294 }
295 295
296 dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); 296 dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
297 if (IS_ERR(dpaux->rst)) 297 if (IS_ERR(dpaux->rst)) {
298 dev_err(&pdev->dev, "failed to get reset control: %ld\n",
299 PTR_ERR(dpaux->rst));
298 return PTR_ERR(dpaux->rst); 300 return PTR_ERR(dpaux->rst);
301 }
299 302
300 dpaux->clk = devm_clk_get(&pdev->dev, NULL); 303 dpaux->clk = devm_clk_get(&pdev->dev, NULL);
301 if (IS_ERR(dpaux->clk)) 304 if (IS_ERR(dpaux->clk)) {
305 dev_err(&pdev->dev, "failed to get module clock: %ld\n",
306 PTR_ERR(dpaux->clk));
302 return PTR_ERR(dpaux->clk); 307 return PTR_ERR(dpaux->clk);
308 }
303 309
304 err = clk_prepare_enable(dpaux->clk); 310 err = clk_prepare_enable(dpaux->clk);
305 if (err < 0) 311 if (err < 0) {
312 dev_err(&pdev->dev, "failed to enable module clock: %d\n",
313 err);
306 return err; 314 return err;
315 }
307 316
308 reset_control_deassert(dpaux->rst); 317 reset_control_deassert(dpaux->rst);
309 318
310 dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent"); 319 dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
311 if (IS_ERR(dpaux->clk_parent)) 320 if (IS_ERR(dpaux->clk_parent)) {
321 dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
322 PTR_ERR(dpaux->clk_parent));
312 return PTR_ERR(dpaux->clk_parent); 323 return PTR_ERR(dpaux->clk_parent);
324 }
313 325
314 err = clk_prepare_enable(dpaux->clk_parent); 326 err = clk_prepare_enable(dpaux->clk_parent);
315 if (err < 0) 327 if (err < 0) {
328 dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
329 err);
316 return err; 330 return err;
331 }
317 332
318 err = clk_set_rate(dpaux->clk_parent, 270000000); 333 err = clk_set_rate(dpaux->clk_parent, 270000000);
319 if (err < 0) { 334 if (err < 0) {
@@ -323,8 +338,11 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
323 } 338 }
324 339
325 dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd"); 340 dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
326 if (IS_ERR(dpaux->vdd)) 341 if (IS_ERR(dpaux->vdd)) {
342 dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
343 PTR_ERR(dpaux->vdd));
327 return PTR_ERR(dpaux->vdd); 344 return PTR_ERR(dpaux->vdd);
345 }
328 346
329 err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0, 347 err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
330 dev_name(dpaux->dev), dpaux); 348 dev_name(dpaux->dev), dpaux);
@@ -334,6 +352,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
334 return err; 352 return err;
335 } 353 }
336 354
355 disable_irq(dpaux->irq);
356
337 dpaux->aux.transfer = tegra_dpaux_transfer; 357 dpaux->aux.transfer = tegra_dpaux_transfer;
338 dpaux->aux.dev = &pdev->dev; 358 dpaux->aux.dev = &pdev->dev;
339 359
@@ -341,6 +361,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
341 if (err < 0) 361 if (err < 0)
342 return err; 362 return err;
343 363
364 /*
365 * Assume that by default the DPAUX/I2C pads will be used for HDMI,
366 * so power them up and configure them in I2C mode.
367 *
368 * The DPAUX code paths reconfigure the pads in AUX mode, but there
369 * is no possibility to perform the I2C mode configuration in the
370 * HDMI path.
371 */
372 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
373 value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
374 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
375
376 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
377 value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
378 DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
379 DPAUX_HYBRID_PADCTL_MODE_I2C;
380 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
381
344 /* enable and clear all interrupts */ 382 /* enable and clear all interrupts */
345 value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT | 383 value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
346 DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT; 384 DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -359,6 +397,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
359static int tegra_dpaux_remove(struct platform_device *pdev) 397static int tegra_dpaux_remove(struct platform_device *pdev)
360{ 398{
361 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev); 399 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
400 u32 value;
401
402 /* make sure pads are powered down when not in use */
403 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
404 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
405 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
362 406
363 drm_dp_aux_unregister(&dpaux->aux); 407 drm_dp_aux_unregister(&dpaux->aux);
364 408
@@ -376,6 +420,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
376} 420}
377 421
378static const struct of_device_id tegra_dpaux_of_match[] = { 422static const struct of_device_id tegra_dpaux_of_match[] = {
423 { .compatible = "nvidia,tegra210-dpaux", },
379 { .compatible = "nvidia,tegra124-dpaux", }, 424 { .compatible = "nvidia,tegra124-dpaux", },
380 { }, 425 { },
381}; 426};
@@ -425,8 +470,10 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
425 enum drm_connector_status status; 470 enum drm_connector_status status;
426 471
427 status = tegra_dpaux_detect(dpaux); 472 status = tegra_dpaux_detect(dpaux);
428 if (status == connector_status_connected) 473 if (status == connector_status_connected) {
474 enable_irq(dpaux->irq);
429 return 0; 475 return 0;
476 }
430 477
431 usleep_range(1000, 2000); 478 usleep_range(1000, 2000);
432 } 479 }
@@ -439,6 +486,8 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
439 unsigned long timeout; 486 unsigned long timeout;
440 int err; 487 int err;
441 488
489 disable_irq(dpaux->irq);
490
442 err = regulator_disable(dpaux->vdd); 491 err = regulator_disable(dpaux->vdd);
443 if (err < 0) 492 if (err < 0)
444 return err; 493 return err;
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 806e245ca787..20783d9f4728 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -57,6 +57,8 @@
57#define DPAUX_DP_AUX_CONFIG 0x45 57#define DPAUX_DP_AUX_CONFIG 0x45
58 58
59#define DPAUX_HYBRID_PADCTL 0x49 59#define DPAUX_HYBRID_PADCTL 0x49
60#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
61#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
60#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12) 62#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
61#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8) 63#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
62#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2) 64#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 427f50c6803c..6d88cf1fcd1c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -171,8 +171,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
171 if (err < 0) 171 if (err < 0)
172 goto fbdev; 172 goto fbdev;
173 173
174 drm_mode_config_reset(drm);
175
176 /* 174 /*
177 * We don't use the drm_irq_install() helpers provided by the DRM 175 * We don't use the drm_irq_install() helpers provided by the DRM
178 * core, so we need to set this manually in order to allow the 176 * core, so we need to set this manually in order to allow the
@@ -182,11 +180,14 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
182 180
183 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 181 /* syncpoints are used for full 32-bit hardware VBLANK counters */
184 drm->max_vblank_count = 0xffffffff; 182 drm->max_vblank_count = 0xffffffff;
183 drm->vblank_disable_allowed = true;
185 184
186 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 185 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
187 if (err < 0) 186 if (err < 0)
188 goto device; 187 goto device;
189 188
189 drm_mode_config_reset(drm);
190
190 err = tegra_drm_fb_init(drm); 191 err = tegra_drm_fb_init(drm);
191 if (err < 0) 192 if (err < 0)
192 goto vblank; 193 goto vblank;
@@ -1037,9 +1038,8 @@ static int host1x_drm_resume(struct device *dev)
1037} 1038}
1038#endif 1039#endif
1039 1040
1040static const struct dev_pm_ops host1x_drm_pm_ops = { 1041static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
1041 SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume) 1042 host1x_drm_resume);
1042};
1043 1043
1044static const struct of_device_id host1x_drm_subdevs[] = { 1044static const struct of_device_id host1x_drm_subdevs[] = {
1045 { .compatible = "nvidia,tegra20-dc", }, 1045 { .compatible = "nvidia,tegra20-dc", },
@@ -1056,6 +1056,12 @@ static const struct of_device_id host1x_drm_subdevs[] = {
1056 { .compatible = "nvidia,tegra124-dc", }, 1056 { .compatible = "nvidia,tegra124-dc", },
1057 { .compatible = "nvidia,tegra124-sor", }, 1057 { .compatible = "nvidia,tegra124-sor", },
1058 { .compatible = "nvidia,tegra124-hdmi", }, 1058 { .compatible = "nvidia,tegra124-hdmi", },
1059 { .compatible = "nvidia,tegra124-dsi", },
1060 { .compatible = "nvidia,tegra132-dsi", },
1061 { .compatible = "nvidia,tegra210-dc", },
1062 { .compatible = "nvidia,tegra210-dsi", },
1063 { .compatible = "nvidia,tegra210-sor", },
1064 { .compatible = "nvidia,tegra210-sor1", },
1059 { /* sentinel */ } 1065 { /* sentinel */ }
1060}; 1066};
1061 1067
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 659b2fcc986d..ec49275ffb24 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
12 12
13#include <uapi/drm/tegra_drm.h> 13#include <uapi/drm/tegra_drm.h>
14#include <linux/host1x.h> 14#include <linux/host1x.h>
15#include <linux/of_gpio.h>
15 16
16#include <drm/drmP.h> 17#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
@@ -104,6 +105,13 @@ int tegra_drm_exit(struct tegra_drm *tegra);
104struct tegra_dc_soc_info; 105struct tegra_dc_soc_info;
105struct tegra_output; 106struct tegra_output;
106 107
108struct tegra_dc_stats {
109 unsigned long frames;
110 unsigned long vblank;
111 unsigned long underflow;
112 unsigned long overflow;
113};
114
107struct tegra_dc { 115struct tegra_dc {
108 struct host1x_client client; 116 struct host1x_client client;
109 struct host1x_syncpt *syncpt; 117 struct host1x_syncpt *syncpt;
@@ -121,6 +129,7 @@ struct tegra_dc {
121 129
122 struct tegra_output *rgb; 130 struct tegra_output *rgb;
123 131
132 struct tegra_dc_stats stats;
124 struct list_head list; 133 struct list_head list;
125 134
126 struct drm_info_list *debugfs_files; 135 struct drm_info_list *debugfs_files;
@@ -200,6 +209,7 @@ struct tegra_output {
200 const struct edid *edid; 209 const struct edid *edid;
201 unsigned int hpd_irq; 210 unsigned int hpd_irq;
202 int hpd_gpio; 211 int hpd_gpio;
212 enum of_gpio_flags hpd_gpio_flags;
203 213
204 struct drm_encoder encoder; 214 struct drm_encoder encoder;
205 struct drm_connector connector; 215 struct drm_connector connector;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index dc97c0b3681d..f0a138ef68ce 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -119,6 +119,16 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
119{ 119{
120 struct drm_info_node *node = s->private; 120 struct drm_info_node *node = s->private;
121 struct tegra_dsi *dsi = node->info_ent->data; 121 struct tegra_dsi *dsi = node->info_ent->data;
122 struct drm_crtc *crtc = dsi->output.encoder.crtc;
123 struct drm_device *drm = node->minor->dev;
124 int err = 0;
125
126 drm_modeset_lock_all(drm);
127
128 if (!crtc || !crtc->state->active) {
129 err = -EBUSY;
130 goto unlock;
131 }
122 132
123#define DUMP_REG(name) \ 133#define DUMP_REG(name) \
124 seq_printf(s, "%-32s %#05x %08x\n", #name, name, \ 134 seq_printf(s, "%-32s %#05x %08x\n", #name, name, \
@@ -208,7 +218,9 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
208 218
209#undef DUMP_REG 219#undef DUMP_REG
210 220
211 return 0; 221unlock:
222 drm_modeset_unlock_all(drm);
223 return err;
212} 224}
213 225
214static struct drm_info_list debugfs_files[] = { 226static struct drm_info_list debugfs_files[] = {
@@ -548,14 +560,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
548 560
549 /* horizontal sync width */ 561 /* horizontal sync width */
550 hsw = (mode->hsync_end - mode->hsync_start) * mul / div; 562 hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
551 hsw -= 10;
552 563
553 /* horizontal back porch */ 564 /* horizontal back porch */
554 hbp = (mode->htotal - mode->hsync_end) * mul / div; 565 hbp = (mode->htotal - mode->hsync_end) * mul / div;
555 hbp -= 14; 566
567 if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
568 hbp += hsw;
556 569
557 /* horizontal front porch */ 570 /* horizontal front porch */
558 hfp = (mode->hsync_start - mode->hdisplay) * mul / div; 571 hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
572
573 /* subtract packet overhead */
574 hsw -= 10;
575 hbp -= 14;
559 hfp -= 8; 576 hfp -= 8;
560 577
561 tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1); 578 tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
@@ -726,11 +743,6 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
726 tegra_dsi_soft_reset(dsi->slave); 743 tegra_dsi_soft_reset(dsi->slave);
727} 744}
728 745
729static int tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
730{
731 return 0;
732}
733
734static void tegra_dsi_connector_reset(struct drm_connector *connector) 746static void tegra_dsi_connector_reset(struct drm_connector *connector)
735{ 747{
736 struct tegra_dsi_state *state; 748 struct tegra_dsi_state *state;
@@ -757,7 +769,7 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
757} 769}
758 770
759static const struct drm_connector_funcs tegra_dsi_connector_funcs = { 771static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
760 .dpms = tegra_dsi_connector_dpms, 772 .dpms = drm_atomic_helper_connector_dpms,
761 .reset = tegra_dsi_connector_reset, 773 .reset = tegra_dsi_connector_reset,
762 .detect = tegra_output_connector_detect, 774 .detect = tegra_output_connector_detect,
763 .fill_modes = drm_helper_probe_single_connector_modes, 775 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -783,22 +795,48 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
783 .destroy = tegra_output_encoder_destroy, 795 .destroy = tegra_output_encoder_destroy,
784}; 796};
785 797
786static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode) 798static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
787{ 799{
788} 800 struct tegra_output *output = encoder_to_output(encoder);
801 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
802 struct tegra_dsi *dsi = to_dsi(output);
803 u32 value;
804 int err;
789 805
790static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder) 806 if (output->panel)
791{ 807 drm_panel_disable(output->panel);
792}
793 808
794static void tegra_dsi_encoder_commit(struct drm_encoder *encoder) 809 tegra_dsi_video_disable(dsi);
795{ 810
811 /*
812 * The following accesses registers of the display controller, so make
813 * sure it's only executed when the output is attached to one.
814 */
815 if (dc) {
816 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
817 value &= ~DSI_ENABLE;
818 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
819
820 tegra_dc_commit(dc);
821 }
822
823 err = tegra_dsi_wait_idle(dsi, 100);
824 if (err < 0)
825 dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
826
827 tegra_dsi_soft_reset(dsi);
828
829 if (output->panel)
830 drm_panel_unprepare(output->panel);
831
832 tegra_dsi_disable(dsi);
833
834 return;
796} 835}
797 836
798static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder, 837static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
799 struct drm_display_mode *mode,
800 struct drm_display_mode *adjusted)
801{ 838{
839 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
802 struct tegra_output *output = encoder_to_output(encoder); 840 struct tegra_output *output = encoder_to_output(encoder);
803 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 841 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
804 struct tegra_dsi *dsi = to_dsi(output); 842 struct tegra_dsi *dsi = to_dsi(output);
@@ -836,45 +874,6 @@ static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
836 return; 874 return;
837} 875}
838 876
839static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
840{
841 struct tegra_output *output = encoder_to_output(encoder);
842 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
843 struct tegra_dsi *dsi = to_dsi(output);
844 u32 value;
845 int err;
846
847 if (output->panel)
848 drm_panel_disable(output->panel);
849
850 tegra_dsi_video_disable(dsi);
851
852 /*
853 * The following accesses registers of the display controller, so make
854 * sure it's only executed when the output is attached to one.
855 */
856 if (dc) {
857 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
858 value &= ~DSI_ENABLE;
859 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
860
861 tegra_dc_commit(dc);
862 }
863
864 err = tegra_dsi_wait_idle(dsi, 100);
865 if (err < 0)
866 dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
867
868 tegra_dsi_soft_reset(dsi);
869
870 if (output->panel)
871 drm_panel_unprepare(output->panel);
872
873 tegra_dsi_disable(dsi);
874
875 return;
876}
877
878static int 877static int
879tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder, 878tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
880 struct drm_crtc_state *crtc_state, 879 struct drm_crtc_state *crtc_state,
@@ -957,11 +956,8 @@ tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
957} 956}
958 957
959static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { 958static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
960 .dpms = tegra_dsi_encoder_dpms,
961 .prepare = tegra_dsi_encoder_prepare,
962 .commit = tegra_dsi_encoder_commit,
963 .mode_set = tegra_dsi_encoder_mode_set,
964 .disable = tegra_dsi_encoder_disable, 959 .disable = tegra_dsi_encoder_disable,
960 .enable = tegra_dsi_encoder_enable,
965 .atomic_check = tegra_dsi_encoder_atomic_check, 961 .atomic_check = tegra_dsi_encoder_atomic_check,
966}; 962};
967 963
@@ -993,6 +989,10 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
993 DSI_PAD_OUT_CLK(0x0); 989 DSI_PAD_OUT_CLK(0x0);
994 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2); 990 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
995 991
992 value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
993 DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
994 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
995
996 return tegra_mipi_calibrate(dsi->mipi); 996 return tegra_mipi_calibrate(dsi->mipi);
997} 997}
998 998
@@ -1622,6 +1622,9 @@ static int tegra_dsi_remove(struct platform_device *pdev)
1622} 1622}
1623 1623
1624static const struct of_device_id tegra_dsi_of_match[] = { 1624static const struct of_device_id tegra_dsi_of_match[] = {
1625 { .compatible = "nvidia,tegra210-dsi", },
1626 { .compatible = "nvidia,tegra132-dsi", },
1627 { .compatible = "nvidia,tegra124-dsi", },
1625 { .compatible = "nvidia,tegra114-dsi", }, 1628 { .compatible = "nvidia,tegra114-dsi", },
1626 { }, 1629 { },
1627}; 1630};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index bad1006a5150..219263615399 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -113,6 +113,10 @@
113#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12) 113#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12)
114#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16) 114#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16)
115#define DSI_PAD_CONTROL_3 0x51 115#define DSI_PAD_CONTROL_3 0x51
116#define DSI_PAD_PREEMP_PD_CLK(x) (((x) & 0x3) << 12)
117#define DSI_PAD_PREEMP_PU_CLK(x) (((x) & 0x3) << 8)
118#define DSI_PAD_PREEMP_PD(x) (((x) & 0x3) << 4)
119#define DSI_PAD_PREEMP_PU(x) (((x) & 0x3) << 0)
116#define DSI_PAD_CONTROL_4 0x52 120#define DSI_PAD_CONTROL_4 0x52
117#define DSI_GANGED_MODE_CONTROL 0x53 121#define DSI_GANGED_MODE_CONTROL 0x53
118#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0) 122#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 397fb34d5d5b..07c844b746b4 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -184,9 +184,9 @@ unreference:
184#ifdef CONFIG_DRM_TEGRA_FBDEV 184#ifdef CONFIG_DRM_TEGRA_FBDEV
185static struct fb_ops tegra_fb_ops = { 185static struct fb_ops tegra_fb_ops = {
186 .owner = THIS_MODULE, 186 .owner = THIS_MODULE,
187 .fb_fillrect = sys_fillrect, 187 .fb_fillrect = drm_fb_helper_sys_fillrect,
188 .fb_copyarea = sys_copyarea, 188 .fb_copyarea = drm_fb_helper_sys_copyarea,
189 .fb_imageblit = sys_imageblit, 189 .fb_imageblit = drm_fb_helper_sys_imageblit,
190 .fb_check_var = drm_fb_helper_check_var, 190 .fb_check_var = drm_fb_helper_check_var,
191 .fb_set_par = drm_fb_helper_set_par, 191 .fb_set_par = drm_fb_helper_set_par,
192 .fb_blank = drm_fb_helper_blank, 192 .fb_blank = drm_fb_helper_blank,
@@ -224,11 +224,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
224 if (IS_ERR(bo)) 224 if (IS_ERR(bo))
225 return PTR_ERR(bo); 225 return PTR_ERR(bo);
226 226
227 info = framebuffer_alloc(0, drm->dev); 227 info = drm_fb_helper_alloc_fbi(helper);
228 if (!info) { 228 if (IS_ERR(info)) {
229 dev_err(drm->dev, "failed to allocate framebuffer info\n"); 229 dev_err(drm->dev, "failed to allocate framebuffer info\n");
230 drm_gem_object_unreference_unlocked(&bo->gem); 230 drm_gem_object_unreference_unlocked(&bo->gem);
231 return -ENOMEM; 231 return PTR_ERR(info);
232 } 232 }
233 233
234 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); 234 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
@@ -248,12 +248,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
248 info->flags = FBINFO_FLAG_DEFAULT; 248 info->flags = FBINFO_FLAG_DEFAULT;
249 info->fbops = &tegra_fb_ops; 249 info->fbops = &tegra_fb_ops;
250 250
251 err = fb_alloc_cmap(&info->cmap, 256, 0);
252 if (err < 0) {
253 dev_err(drm->dev, "failed to allocate color map: %d\n", err);
254 goto destroy;
255 }
256
257 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 251 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
258 drm_fb_helper_fill_var(info, helper, fb->width, fb->height); 252 drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
259 253
@@ -282,7 +276,7 @@ destroy:
282 drm_framebuffer_unregister_private(fb); 276 drm_framebuffer_unregister_private(fb);
283 tegra_fb_destroy(fb); 277 tegra_fb_destroy(fb);
284release: 278release:
285 framebuffer_release(info); 279 drm_fb_helper_release_fbi(helper);
286 return err; 280 return err;
287} 281}
288 282
@@ -347,20 +341,9 @@ fini:
347 341
348static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) 342static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
349{ 343{
350 struct fb_info *info = fbdev->base.fbdev;
351
352 if (info) {
353 int err;
354 344
355 err = unregister_framebuffer(info); 345 drm_fb_helper_unregister_fbi(&fbdev->base);
356 if (err < 0) 346 drm_fb_helper_release_fbi(&fbdev->base);
357 DRM_DEBUG_KMS("failed to unregister framebuffer\n");
358
359 if (info->cmap.len)
360 fb_dealloc_cmap(&info->cmap);
361
362 framebuffer_release(info);
363 }
364 347
365 if (fbdev->fb) { 348 if (fbdev->fb) {
366 drm_framebuffer_unregister_private(&fbdev->fb->base); 349 drm_framebuffer_unregister_private(&fbdev->fb->base);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index fe4008a7ddba..52b32cbd9de6 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -772,14 +772,8 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
772 return drm_detect_hdmi_monitor(edid); 772 return drm_detect_hdmi_monitor(edid);
773} 773}
774 774
775static int tegra_hdmi_connector_dpms(struct drm_connector *connector,
776 int mode)
777{
778 return 0;
779}
780
781static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { 775static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
782 .dpms = tegra_hdmi_connector_dpms, 776 .dpms = drm_atomic_helper_connector_dpms,
783 .reset = drm_atomic_helper_connector_reset, 777 .reset = drm_atomic_helper_connector_reset,
784 .detect = tegra_output_connector_detect, 778 .detect = tegra_output_connector_detect,
785 .fill_modes = drm_helper_probe_single_connector_modes, 779 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -819,22 +813,27 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
819 .destroy = tegra_output_encoder_destroy, 813 .destroy = tegra_output_encoder_destroy,
820}; 814};
821 815
822static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) 816static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
823{ 817{
824} 818 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
819 u32 value;
825 820
826static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder) 821 /*
827{ 822 * The following accesses registers of the display controller, so make
828} 823 * sure it's only executed when the output is attached to one.
824 */
825 if (dc) {
826 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
827 value &= ~HDMI_ENABLE;
828 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
829 829
830static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder) 830 tegra_dc_commit(dc);
831{ 831 }
832} 832}
833 833
834static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, 834static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
835 struct drm_display_mode *mode,
836 struct drm_display_mode *adjusted)
837{ 835{
836 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
838 unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; 837 unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
839 struct tegra_output *output = encoder_to_output(encoder); 838 struct tegra_output *output = encoder_to_output(encoder);
840 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 839 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
@@ -873,13 +872,13 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
873 872
874 tegra_dc_writel(dc, VSYNC_H_POSITION(1), 873 tegra_dc_writel(dc, VSYNC_H_POSITION(1),
875 DC_DISP_DISP_TIMING_OPTIONS); 874 DC_DISP_DISP_TIMING_OPTIONS);
876 tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, 875 tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
877 DC_DISP_DISP_COLOR_CONTROL); 876 DC_DISP_DISP_COLOR_CONTROL);
878 877
879 /* video_preamble uses h_pulse2 */ 878 /* video_preamble uses h_pulse2 */
880 pulse_start = 1 + h_sync_width + h_back_porch - 10; 879 pulse_start = 1 + h_sync_width + h_back_porch - 10;
881 880
882 tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); 881 tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
883 882
884 value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | 883 value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
885 PULSE_LAST_END_A; 884 PULSE_LAST_END_A;
@@ -1036,24 +1035,6 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
1036 /* TODO: add HDCP support */ 1035 /* TODO: add HDCP support */
1037} 1036}
1038 1037
1039static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
1040{
1041 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
1042 u32 value;
1043
1044 /*
1045 * The following accesses registers of the display controller, so make
1046 * sure it's only executed when the output is attached to one.
1047 */
1048 if (dc) {
1049 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
1050 value &= ~HDMI_ENABLE;
1051 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
1052
1053 tegra_dc_commit(dc);
1054 }
1055}
1056
1057static int 1038static int
1058tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder, 1039tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
1059 struct drm_crtc_state *crtc_state, 1040 struct drm_crtc_state *crtc_state,
@@ -1076,11 +1057,8 @@ tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
1076} 1057}
1077 1058
1078static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { 1059static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
1079 .dpms = tegra_hdmi_encoder_dpms,
1080 .prepare = tegra_hdmi_encoder_prepare,
1081 .commit = tegra_hdmi_encoder_commit,
1082 .mode_set = tegra_hdmi_encoder_mode_set,
1083 .disable = tegra_hdmi_encoder_disable, 1060 .disable = tegra_hdmi_encoder_disable,
1061 .enable = tegra_hdmi_encoder_enable,
1084 .atomic_check = tegra_hdmi_encoder_atomic_check, 1062 .atomic_check = tegra_hdmi_encoder_atomic_check,
1085}; 1063};
1086 1064
@@ -1088,11 +1066,16 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1088{ 1066{
1089 struct drm_info_node *node = s->private; 1067 struct drm_info_node *node = s->private;
1090 struct tegra_hdmi *hdmi = node->info_ent->data; 1068 struct tegra_hdmi *hdmi = node->info_ent->data;
1091 int err; 1069 struct drm_crtc *crtc = hdmi->output.encoder.crtc;
1070 struct drm_device *drm = node->minor->dev;
1071 int err = 0;
1092 1072
1093 err = clk_prepare_enable(hdmi->clk); 1073 drm_modeset_lock_all(drm);
1094 if (err) 1074
1095 return err; 1075 if (!crtc || !crtc->state->active) {
1076 err = -EBUSY;
1077 goto unlock;
1078 }
1096 1079
1097#define DUMP_REG(name) \ 1080#define DUMP_REG(name) \
1098 seq_printf(s, "%-56s %#05x %08x\n", #name, name, \ 1081 seq_printf(s, "%-56s %#05x %08x\n", #name, name, \
@@ -1259,9 +1242,9 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1259 1242
1260#undef DUMP_REG 1243#undef DUMP_REG
1261 1244
1262 clk_disable_unprepare(hdmi->clk); 1245unlock:
1263 1246 drm_modeset_unlock_all(drm);
1264 return 0; 1247 return err;
1265} 1248}
1266 1249
1267static struct drm_info_list debugfs_files[] = { 1250static struct drm_info_list debugfs_files[] = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 37db47975d48..46664b622270 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,8 +7,6 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/of_gpio.h>
11
12#include <drm/drm_atomic_helper.h> 10#include <drm/drm_atomic_helper.h>
13#include <drm/drm_panel.h> 11#include <drm/drm_panel.h>
14#include "drm.h" 12#include "drm.h"
@@ -59,10 +57,17 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
59 enum drm_connector_status status = connector_status_unknown; 57 enum drm_connector_status status = connector_status_unknown;
60 58
61 if (gpio_is_valid(output->hpd_gpio)) { 59 if (gpio_is_valid(output->hpd_gpio)) {
62 if (gpio_get_value(output->hpd_gpio) == 0) 60 if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) {
63 status = connector_status_disconnected; 61 if (gpio_get_value(output->hpd_gpio) != 0)
64 else 62 status = connector_status_disconnected;
65 status = connector_status_connected; 63 else
64 status = connector_status_connected;
65 } else {
66 if (gpio_get_value(output->hpd_gpio) == 0)
67 status = connector_status_disconnected;
68 else
69 status = connector_status_connected;
70 }
66 } else { 71 } else {
67 if (!output->panel) 72 if (!output->panel)
68 status = connector_status_disconnected; 73 status = connector_status_disconnected;
@@ -97,7 +102,6 @@ static irqreturn_t hpd_irq(int irq, void *data)
97int tegra_output_probe(struct tegra_output *output) 102int tegra_output_probe(struct tegra_output *output)
98{ 103{
99 struct device_node *ddc, *panel; 104 struct device_node *ddc, *panel;
100 enum of_gpio_flags flags;
101 int err, size; 105 int err, size;
102 106
103 if (!output->of_node) 107 if (!output->of_node)
@@ -128,7 +132,7 @@ int tegra_output_probe(struct tegra_output *output)
128 132
129 output->hpd_gpio = of_get_named_gpio_flags(output->of_node, 133 output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
130 "nvidia,hpd-gpio", 0, 134 "nvidia,hpd-gpio", 0,
131 &flags); 135 &output->hpd_gpio_flags);
132 if (gpio_is_valid(output->hpd_gpio)) { 136 if (gpio_is_valid(output->hpd_gpio)) {
133 unsigned long flags; 137 unsigned long flags;
134 138
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 9a99d213e1b1..bc9735b4ad60 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -18,7 +18,6 @@
18struct tegra_rgb { 18struct tegra_rgb {
19 struct tegra_output output; 19 struct tegra_output output;
20 struct tegra_dc *dc; 20 struct tegra_dc *dc;
21 bool enabled;
22 21
23 struct clk *clk_parent; 22 struct clk *clk_parent;
24 struct clk *clk; 23 struct clk *clk;
@@ -88,14 +87,8 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
88 tegra_dc_writel(dc, table[i].value, table[i].offset); 87 tegra_dc_writel(dc, table[i].value, table[i].offset);
89} 88}
90 89
91static int tegra_rgb_connector_dpms(struct drm_connector *connector,
92 int mode)
93{
94 return 0;
95}
96
97static const struct drm_connector_funcs tegra_rgb_connector_funcs = { 90static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
98 .dpms = tegra_rgb_connector_dpms, 91 .dpms = drm_atomic_helper_connector_dpms,
99 .reset = drm_atomic_helper_connector_reset, 92 .reset = drm_atomic_helper_connector_reset,
100 .detect = tegra_output_connector_detect, 93 .detect = tegra_output_connector_detect,
101 .fill_modes = drm_helper_probe_single_connector_modes, 94 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -126,21 +119,22 @@ static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
126 .destroy = tegra_output_encoder_destroy, 119 .destroy = tegra_output_encoder_destroy,
127}; 120};
128 121
129static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode) 122static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
130{ 123{
131} 124 struct tegra_output *output = encoder_to_output(encoder);
125 struct tegra_rgb *rgb = to_rgb(output);
132 126
133static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder) 127 if (output->panel)
134{ 128 drm_panel_disable(output->panel);
135}
136 129
137static void tegra_rgb_encoder_commit(struct drm_encoder *encoder) 130 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
138{ 131 tegra_dc_commit(rgb->dc);
132
133 if (output->panel)
134 drm_panel_unprepare(output->panel);
139} 135}
140 136
141static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder, 137static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
142 struct drm_display_mode *mode,
143 struct drm_display_mode *adjusted)
144{ 138{
145 struct tegra_output *output = encoder_to_output(encoder); 139 struct tegra_output *output = encoder_to_output(encoder);
146 struct tegra_rgb *rgb = to_rgb(output); 140 struct tegra_rgb *rgb = to_rgb(output);
@@ -175,21 +169,6 @@ static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
175 drm_panel_enable(output->panel); 169 drm_panel_enable(output->panel);
176} 170}
177 171
178static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
179{
180 struct tegra_output *output = encoder_to_output(encoder);
181 struct tegra_rgb *rgb = to_rgb(output);
182
183 if (output->panel)
184 drm_panel_disable(output->panel);
185
186 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
187 tegra_dc_commit(rgb->dc);
188
189 if (output->panel)
190 drm_panel_unprepare(output->panel);
191}
192
193static int 172static int
194tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, 173tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
195 struct drm_crtc_state *crtc_state, 174 struct drm_crtc_state *crtc_state,
@@ -232,11 +211,8 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
232} 211}
233 212
234static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = { 213static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
235 .dpms = tegra_rgb_encoder_dpms,
236 .prepare = tegra_rgb_encoder_prepare,
237 .commit = tegra_rgb_encoder_commit,
238 .mode_set = tegra_rgb_encoder_mode_set,
239 .disable = tegra_rgb_encoder_disable, 214 .disable = tegra_rgb_encoder_disable,
215 .enable = tegra_rgb_encoder_enable,
240 .atomic_check = tegra_rgb_encoder_atomic_check, 216 .atomic_check = tegra_rgb_encoder_atomic_check,
241}; 217};
242 218
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index ee8ad0d4a0f2..da1715ebdd71 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -10,7 +10,9 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/gpio.h> 11#include <linux/gpio.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/of_device.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/regulator/consumer.h>
14#include <linux/reset.h> 16#include <linux/reset.h>
15 17
16#include <soc/tegra/pmc.h> 18#include <soc/tegra/pmc.h>
@@ -23,11 +25,146 @@
23#include "drm.h" 25#include "drm.h"
24#include "sor.h" 26#include "sor.h"
25 27
28#define SOR_REKEY 0x38
29
30struct tegra_sor_hdmi_settings {
31 unsigned long frequency;
32
33 u8 vcocap;
34 u8 ichpmp;
35 u8 loadadj;
36 u8 termadj;
37 u8 tx_pu;
38 u8 bg_vref;
39
40 u8 drive_current[4];
41 u8 preemphasis[4];
42};
43
44#if 1
45static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
46 {
47 .frequency = 54000000,
48 .vcocap = 0x0,
49 .ichpmp = 0x1,
50 .loadadj = 0x3,
51 .termadj = 0x9,
52 .tx_pu = 0x10,
53 .bg_vref = 0x8,
54 .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
55 .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
56 }, {
57 .frequency = 75000000,
58 .vcocap = 0x3,
59 .ichpmp = 0x1,
60 .loadadj = 0x3,
61 .termadj = 0x9,
62 .tx_pu = 0x40,
63 .bg_vref = 0x8,
64 .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
65 .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
66 }, {
67 .frequency = 150000000,
68 .vcocap = 0x3,
69 .ichpmp = 0x1,
70 .loadadj = 0x3,
71 .termadj = 0x9,
72 .tx_pu = 0x66,
73 .bg_vref = 0x8,
74 .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
75 .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
76 }, {
77 .frequency = 300000000,
78 .vcocap = 0x3,
79 .ichpmp = 0x1,
80 .loadadj = 0x3,
81 .termadj = 0x9,
82 .tx_pu = 0x66,
83 .bg_vref = 0xa,
84 .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
85 .preemphasis = { 0x00, 0x17, 0x17, 0x17 },
86 }, {
87 .frequency = 600000000,
88 .vcocap = 0x3,
89 .ichpmp = 0x1,
90 .loadadj = 0x3,
91 .termadj = 0x9,
92 .tx_pu = 0x66,
93 .bg_vref = 0x8,
94 .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
95 .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
96 },
97};
98#else
99static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
100 {
101 .frequency = 75000000,
102 .vcocap = 0x3,
103 .ichpmp = 0x1,
104 .loadadj = 0x3,
105 .termadj = 0x9,
106 .tx_pu = 0x40,
107 .bg_vref = 0x8,
108 .drive_current = { 0x29, 0x29, 0x29, 0x29 },
109 .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
110 }, {
111 .frequency = 150000000,
112 .vcocap = 0x3,
113 .ichpmp = 0x1,
114 .loadadj = 0x3,
115 .termadj = 0x9,
116 .tx_pu = 0x66,
117 .bg_vref = 0x8,
118 .drive_current = { 0x30, 0x37, 0x37, 0x37 },
119 .preemphasis = { 0x01, 0x02, 0x02, 0x02 },
120 }, {
121 .frequency = 300000000,
122 .vcocap = 0x3,
123 .ichpmp = 0x6,
124 .loadadj = 0x3,
125 .termadj = 0x9,
126 .tx_pu = 0x66,
127 .bg_vref = 0xf,
128 .drive_current = { 0x30, 0x37, 0x37, 0x37 },
129 .preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
130 }, {
131 .frequency = 600000000,
132 .vcocap = 0x3,
133 .ichpmp = 0xa,
134 .loadadj = 0x3,
135 .termadj = 0xb,
136 .tx_pu = 0x66,
137 .bg_vref = 0xe,
138 .drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
139 .preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
140 },
141};
142#endif
143
144struct tegra_sor_soc {
145 bool supports_edp;
146 bool supports_lvds;
147 bool supports_hdmi;
148 bool supports_dp;
149
150 const struct tegra_sor_hdmi_settings *settings;
151 unsigned int num_settings;
152};
153
154struct tegra_sor;
155
156struct tegra_sor_ops {
157 const char *name;
158 int (*probe)(struct tegra_sor *sor);
159 int (*remove)(struct tegra_sor *sor);
160};
161
26struct tegra_sor { 162struct tegra_sor {
27 struct host1x_client client; 163 struct host1x_client client;
28 struct tegra_output output; 164 struct tegra_output output;
29 struct device *dev; 165 struct device *dev;
30 166
167 const struct tegra_sor_soc *soc;
31 void __iomem *regs; 168 void __iomem *regs;
32 169
33 struct reset_control *rst; 170 struct reset_control *rst;
@@ -38,12 +175,19 @@ struct tegra_sor {
38 175
39 struct tegra_dpaux *dpaux; 176 struct tegra_dpaux *dpaux;
40 177
41 struct mutex lock;
42 bool enabled;
43
44 struct drm_info_list *debugfs_files; 178 struct drm_info_list *debugfs_files;
45 struct drm_minor *minor; 179 struct drm_minor *minor;
46 struct dentry *debugfs; 180 struct dentry *debugfs;
181
182 const struct tegra_sor_ops *ops;
183
184 /* for HDMI 2.0 */
185 struct tegra_sor_hdmi_settings *settings;
186 unsigned int num_settings;
187
188 struct regulator *avdd_io_supply;
189 struct regulator *vdd_pll_supply;
190 struct regulator *hdmi_supply;
47}; 191};
48 192
49struct tegra_sor_config { 193struct tegra_sor_config {
@@ -94,40 +238,40 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
94 SOR_LANE_DRIVE_CURRENT_LANE2(0x40) | 238 SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
95 SOR_LANE_DRIVE_CURRENT_LANE1(0x40) | 239 SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
96 SOR_LANE_DRIVE_CURRENT_LANE0(0x40); 240 SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
97 tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0); 241 tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
98 242
99 value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) | 243 value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
100 SOR_LANE_PREEMPHASIS_LANE2(0x0f) | 244 SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
101 SOR_LANE_PREEMPHASIS_LANE1(0x0f) | 245 SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
102 SOR_LANE_PREEMPHASIS_LANE0(0x0f); 246 SOR_LANE_PREEMPHASIS_LANE0(0x0f);
103 tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0); 247 tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
104 248
105 value = SOR_LANE_POST_CURSOR_LANE3(0x00) | 249 value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
106 SOR_LANE_POST_CURSOR_LANE2(0x00) | 250 SOR_LANE_POSTCURSOR_LANE2(0x00) |
107 SOR_LANE_POST_CURSOR_LANE1(0x00) | 251 SOR_LANE_POSTCURSOR_LANE1(0x00) |
108 SOR_LANE_POST_CURSOR_LANE0(0x00); 252 SOR_LANE_POSTCURSOR_LANE0(0x00);
109 tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0); 253 tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
110 254
111 /* disable LVDS mode */ 255 /* disable LVDS mode */
112 tegra_sor_writel(sor, 0, SOR_LVDS); 256 tegra_sor_writel(sor, 0, SOR_LVDS);
113 257
114 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 258 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
115 value |= SOR_DP_PADCTL_TX_PU_ENABLE; 259 value |= SOR_DP_PADCTL_TX_PU_ENABLE;
116 value &= ~SOR_DP_PADCTL_TX_PU_MASK; 260 value &= ~SOR_DP_PADCTL_TX_PU_MASK;
117 value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */ 261 value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
118 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 262 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
119 263
120 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 264 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
121 value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | 265 value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
122 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0; 266 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
123 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 267 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
124 268
125 usleep_range(10, 100); 269 usleep_range(10, 100);
126 270
127 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 271 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
128 value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | 272 value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
129 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); 273 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
130 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 274 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
131 275
132 err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B); 276 err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
133 if (err < 0) 277 if (err < 0)
@@ -148,11 +292,11 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
148 if (err < 0) 292 if (err < 0)
149 return err; 293 return err;
150 294
151 value = tegra_sor_readl(sor, SOR_DP_SPARE_0); 295 value = tegra_sor_readl(sor, SOR_DP_SPARE0);
152 value |= SOR_DP_SPARE_SEQ_ENABLE; 296 value |= SOR_DP_SPARE_SEQ_ENABLE;
153 value &= ~SOR_DP_SPARE_PANEL_INTERNAL; 297 value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
154 value |= SOR_DP_SPARE_MACRO_SOR_CLK; 298 value |= SOR_DP_SPARE_MACRO_SOR_CLK;
155 tegra_sor_writel(sor, value, SOR_DP_SPARE_0); 299 tegra_sor_writel(sor, value, SOR_DP_SPARE0);
156 300
157 for (i = 0, value = 0; i < link->num_lanes; i++) { 301 for (i = 0, value = 0; i < link->num_lanes; i++) {
158 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | 302 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -187,18 +331,59 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
187 return 0; 331 return 0;
188} 332}
189 333
334static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
335{
336 u32 mask = 0x08, adj = 0, value;
337
338 /* enable pad calibration logic */
339 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
340 value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
341 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
342
343 value = tegra_sor_readl(sor, SOR_PLL1);
344 value |= SOR_PLL1_TMDS_TERM;
345 tegra_sor_writel(sor, value, SOR_PLL1);
346
347 while (mask) {
348 adj |= mask;
349
350 value = tegra_sor_readl(sor, SOR_PLL1);
351 value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
352 value |= SOR_PLL1_TMDS_TERMADJ(adj);
353 tegra_sor_writel(sor, value, SOR_PLL1);
354
355 usleep_range(100, 200);
356
357 value = tegra_sor_readl(sor, SOR_PLL1);
358 if (value & SOR_PLL1_TERM_COMPOUT)
359 adj &= ~mask;
360
361 mask >>= 1;
362 }
363
364 value = tegra_sor_readl(sor, SOR_PLL1);
365 value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
366 value |= SOR_PLL1_TMDS_TERMADJ(adj);
367 tegra_sor_writel(sor, value, SOR_PLL1);
368
369 /* disable pad calibration logic */
370 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
371 value |= SOR_DP_PADCTL_PAD_CAL_PD;
372 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
373}
374
190static void tegra_sor_super_update(struct tegra_sor *sor) 375static void tegra_sor_super_update(struct tegra_sor *sor)
191{ 376{
192 tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0); 377 tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
193 tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0); 378 tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
194 tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0); 379 tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
195} 380}
196 381
197static void tegra_sor_update(struct tegra_sor *sor) 382static void tegra_sor_update(struct tegra_sor *sor)
198{ 383{
199 tegra_sor_writel(sor, 0, SOR_STATE_0); 384 tegra_sor_writel(sor, 0, SOR_STATE0);
200 tegra_sor_writel(sor, 1, SOR_STATE_0); 385 tegra_sor_writel(sor, 1, SOR_STATE0);
201 tegra_sor_writel(sor, 0, SOR_STATE_0); 386 tegra_sor_writel(sor, 0, SOR_STATE0);
202} 387}
203 388
204static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout) 389static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
@@ -235,16 +420,16 @@ static int tegra_sor_attach(struct tegra_sor *sor)
235 unsigned long value, timeout; 420 unsigned long value, timeout;
236 421
237 /* wake up in normal mode */ 422 /* wake up in normal mode */
238 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); 423 value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
239 value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE; 424 value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
240 value |= SOR_SUPER_STATE_MODE_NORMAL; 425 value |= SOR_SUPER_STATE_MODE_NORMAL;
241 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); 426 tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
242 tegra_sor_super_update(sor); 427 tegra_sor_super_update(sor);
243 428
244 /* attach */ 429 /* attach */
245 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); 430 value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
246 value |= SOR_SUPER_STATE_ATTACHED; 431 value |= SOR_SUPER_STATE_ATTACHED;
247 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); 432 tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
248 tegra_sor_super_update(sor); 433 tegra_sor_super_update(sor);
249 434
250 timeout = jiffies + msecs_to_jiffies(250); 435 timeout = jiffies + msecs_to_jiffies(250);
@@ -385,7 +570,7 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
385} 570}
386 571
387static int tegra_sor_calc_config(struct tegra_sor *sor, 572static int tegra_sor_calc_config(struct tegra_sor *sor,
388 struct drm_display_mode *mode, 573 const struct drm_display_mode *mode,
389 struct tegra_sor_config *config, 574 struct tegra_sor_config *config,
390 struct drm_dp_link *link) 575 struct drm_dp_link *link)
391{ 576{
@@ -481,9 +666,9 @@ static int tegra_sor_detach(struct tegra_sor *sor)
481 unsigned long value, timeout; 666 unsigned long value, timeout;
482 667
483 /* switch to safe mode */ 668 /* switch to safe mode */
484 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); 669 value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
485 value &= ~SOR_SUPER_STATE_MODE_NORMAL; 670 value &= ~SOR_SUPER_STATE_MODE_NORMAL;
486 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); 671 tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
487 tegra_sor_super_update(sor); 672 tegra_sor_super_update(sor);
488 673
489 timeout = jiffies + msecs_to_jiffies(250); 674 timeout = jiffies + msecs_to_jiffies(250);
@@ -498,15 +683,15 @@ static int tegra_sor_detach(struct tegra_sor *sor)
498 return -ETIMEDOUT; 683 return -ETIMEDOUT;
499 684
500 /* go to sleep */ 685 /* go to sleep */
501 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); 686 value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
502 value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; 687 value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
503 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); 688 tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
504 tegra_sor_super_update(sor); 689 tegra_sor_super_update(sor);
505 690
506 /* detach */ 691 /* detach */
507 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); 692 value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
508 value &= ~SOR_SUPER_STATE_ATTACHED; 693 value &= ~SOR_SUPER_STATE_ATTACHED;
509 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); 694 tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
510 tegra_sor_super_update(sor); 695 tegra_sor_super_update(sor);
511 696
512 timeout = jiffies + msecs_to_jiffies(250); 697 timeout = jiffies + msecs_to_jiffies(250);
@@ -552,10 +737,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
552 if (err < 0) 737 if (err < 0)
553 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); 738 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
554 739
555 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 740 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
556 value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | 741 value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
557 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); 742 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
558 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 743 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
559 744
560 /* stop lane sequencer */ 745 /* stop lane sequencer */
561 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | 746 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -575,39 +760,26 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
575 if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) 760 if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
576 return -ETIMEDOUT; 761 return -ETIMEDOUT;
577 762
578 value = tegra_sor_readl(sor, SOR_PLL_2); 763 value = tegra_sor_readl(sor, SOR_PLL2);
579 value |= SOR_PLL_2_PORT_POWERDOWN; 764 value |= SOR_PLL2_PORT_POWERDOWN;
580 tegra_sor_writel(sor, value, SOR_PLL_2); 765 tegra_sor_writel(sor, value, SOR_PLL2);
581 766
582 usleep_range(20, 100); 767 usleep_range(20, 100);
583 768
584 value = tegra_sor_readl(sor, SOR_PLL_0); 769 value = tegra_sor_readl(sor, SOR_PLL0);
585 value |= SOR_PLL_0_POWER_OFF; 770 value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
586 value |= SOR_PLL_0_VCOPD; 771 tegra_sor_writel(sor, value, SOR_PLL0);
587 tegra_sor_writel(sor, value, SOR_PLL_0);
588 772
589 value = tegra_sor_readl(sor, SOR_PLL_2); 773 value = tegra_sor_readl(sor, SOR_PLL2);
590 value |= SOR_PLL_2_SEQ_PLLCAPPD; 774 value |= SOR_PLL2_SEQ_PLLCAPPD;
591 value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; 775 value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
592 tegra_sor_writel(sor, value, SOR_PLL_2); 776 tegra_sor_writel(sor, value, SOR_PLL2);
593 777
594 usleep_range(20, 100); 778 usleep_range(20, 100);
595 779
596 return 0; 780 return 0;
597} 781}
598 782
599static int tegra_sor_crc_open(struct inode *inode, struct file *file)
600{
601 file->private_data = inode->i_private;
602
603 return 0;
604}
605
606static int tegra_sor_crc_release(struct inode *inode, struct file *file)
607{
608 return 0;
609}
610
611static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) 783static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
612{ 784{
613 u32 value; 785 u32 value;
@@ -615,8 +787,8 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
615 timeout = jiffies + msecs_to_jiffies(timeout); 787 timeout = jiffies + msecs_to_jiffies(timeout);
616 788
617 while (time_before(jiffies, timeout)) { 789 while (time_before(jiffies, timeout)) {
618 value = tegra_sor_readl(sor, SOR_CRC_A); 790 value = tegra_sor_readl(sor, SOR_CRCA);
619 if (value & SOR_CRC_A_VALID) 791 if (value & SOR_CRCA_VALID)
620 return 0; 792 return 0;
621 793
622 usleep_range(100, 200); 794 usleep_range(100, 200);
@@ -625,24 +797,25 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
625 return -ETIMEDOUT; 797 return -ETIMEDOUT;
626} 798}
627 799
628static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, 800static int tegra_sor_show_crc(struct seq_file *s, void *data)
629 size_t size, loff_t *ppos)
630{ 801{
631 struct tegra_sor *sor = file->private_data; 802 struct drm_info_node *node = s->private;
632 ssize_t num, err; 803 struct tegra_sor *sor = node->info_ent->data;
633 char buf[10]; 804 struct drm_crtc *crtc = sor->output.encoder.crtc;
805 struct drm_device *drm = node->minor->dev;
806 int err = 0;
634 u32 value; 807 u32 value;
635 808
636 mutex_lock(&sor->lock); 809 drm_modeset_lock_all(drm);
637 810
638 if (!sor->enabled) { 811 if (!crtc || !crtc->state->active) {
639 err = -EAGAIN; 812 err = -EBUSY;
640 goto unlock; 813 goto unlock;
641 } 814 }
642 815
643 value = tegra_sor_readl(sor, SOR_STATE_1); 816 value = tegra_sor_readl(sor, SOR_STATE1);
644 value &= ~SOR_STATE_ASY_CRC_MODE_MASK; 817 value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
645 tegra_sor_writel(sor, value, SOR_STATE_1); 818 tegra_sor_writel(sor, value, SOR_STATE1);
646 819
647 value = tegra_sor_readl(sor, SOR_CRC_CNTRL); 820 value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
648 value |= SOR_CRC_CNTRL_ENABLE; 821 value |= SOR_CRC_CNTRL_ENABLE;
@@ -656,65 +829,66 @@ static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
656 if (err < 0) 829 if (err < 0)
657 goto unlock; 830 goto unlock;
658 831
659 tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); 832 tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
660 value = tegra_sor_readl(sor, SOR_CRC_B); 833 value = tegra_sor_readl(sor, SOR_CRCB);
661 834
662 num = scnprintf(buf, sizeof(buf), "%08x\n", value); 835 seq_printf(s, "%08x\n", value);
663
664 err = simple_read_from_buffer(buffer, size, ppos, buf, num);
665 836
666unlock: 837unlock:
667 mutex_unlock(&sor->lock); 838 drm_modeset_unlock_all(drm);
668 return err; 839 return err;
669} 840}
670 841
671static const struct file_operations tegra_sor_crc_fops = {
672 .owner = THIS_MODULE,
673 .open = tegra_sor_crc_open,
674 .read = tegra_sor_crc_read,
675 .release = tegra_sor_crc_release,
676};
677
678static int tegra_sor_show_regs(struct seq_file *s, void *data) 842static int tegra_sor_show_regs(struct seq_file *s, void *data)
679{ 843{
680 struct drm_info_node *node = s->private; 844 struct drm_info_node *node = s->private;
681 struct tegra_sor *sor = node->info_ent->data; 845 struct tegra_sor *sor = node->info_ent->data;
846 struct drm_crtc *crtc = sor->output.encoder.crtc;
847 struct drm_device *drm = node->minor->dev;
848 int err = 0;
849
850 drm_modeset_lock_all(drm);
851
852 if (!crtc || !crtc->state->active) {
853 err = -EBUSY;
854 goto unlock;
855 }
682 856
683#define DUMP_REG(name) \ 857#define DUMP_REG(name) \
684 seq_printf(s, "%-38s %#05x %08x\n", #name, name, \ 858 seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
685 tegra_sor_readl(sor, name)) 859 tegra_sor_readl(sor, name))
686 860
687 DUMP_REG(SOR_CTXSW); 861 DUMP_REG(SOR_CTXSW);
688 DUMP_REG(SOR_SUPER_STATE_0); 862 DUMP_REG(SOR_SUPER_STATE0);
689 DUMP_REG(SOR_SUPER_STATE_1); 863 DUMP_REG(SOR_SUPER_STATE1);
690 DUMP_REG(SOR_STATE_0); 864 DUMP_REG(SOR_STATE0);
691 DUMP_REG(SOR_STATE_1); 865 DUMP_REG(SOR_STATE1);
692 DUMP_REG(SOR_HEAD_STATE_0(0)); 866 DUMP_REG(SOR_HEAD_STATE0(0));
693 DUMP_REG(SOR_HEAD_STATE_0(1)); 867 DUMP_REG(SOR_HEAD_STATE0(1));
694 DUMP_REG(SOR_HEAD_STATE_1(0)); 868 DUMP_REG(SOR_HEAD_STATE1(0));
695 DUMP_REG(SOR_HEAD_STATE_1(1)); 869 DUMP_REG(SOR_HEAD_STATE1(1));
696 DUMP_REG(SOR_HEAD_STATE_2(0)); 870 DUMP_REG(SOR_HEAD_STATE2(0));
697 DUMP_REG(SOR_HEAD_STATE_2(1)); 871 DUMP_REG(SOR_HEAD_STATE2(1));
698 DUMP_REG(SOR_HEAD_STATE_3(0)); 872 DUMP_REG(SOR_HEAD_STATE3(0));
699 DUMP_REG(SOR_HEAD_STATE_3(1)); 873 DUMP_REG(SOR_HEAD_STATE3(1));
700 DUMP_REG(SOR_HEAD_STATE_4(0)); 874 DUMP_REG(SOR_HEAD_STATE4(0));
701 DUMP_REG(SOR_HEAD_STATE_4(1)); 875 DUMP_REG(SOR_HEAD_STATE4(1));
702 DUMP_REG(SOR_HEAD_STATE_5(0)); 876 DUMP_REG(SOR_HEAD_STATE5(0));
703 DUMP_REG(SOR_HEAD_STATE_5(1)); 877 DUMP_REG(SOR_HEAD_STATE5(1));
704 DUMP_REG(SOR_CRC_CNTRL); 878 DUMP_REG(SOR_CRC_CNTRL);
705 DUMP_REG(SOR_DP_DEBUG_MVID); 879 DUMP_REG(SOR_DP_DEBUG_MVID);
706 DUMP_REG(SOR_CLK_CNTRL); 880 DUMP_REG(SOR_CLK_CNTRL);
707 DUMP_REG(SOR_CAP); 881 DUMP_REG(SOR_CAP);
708 DUMP_REG(SOR_PWR); 882 DUMP_REG(SOR_PWR);
709 DUMP_REG(SOR_TEST); 883 DUMP_REG(SOR_TEST);
710 DUMP_REG(SOR_PLL_0); 884 DUMP_REG(SOR_PLL0);
711 DUMP_REG(SOR_PLL_1); 885 DUMP_REG(SOR_PLL1);
712 DUMP_REG(SOR_PLL_2); 886 DUMP_REG(SOR_PLL2);
713 DUMP_REG(SOR_PLL_3); 887 DUMP_REG(SOR_PLL3);
714 DUMP_REG(SOR_CSTM); 888 DUMP_REG(SOR_CSTM);
715 DUMP_REG(SOR_LVDS); 889 DUMP_REG(SOR_LVDS);
716 DUMP_REG(SOR_CRC_A); 890 DUMP_REG(SOR_CRCA);
717 DUMP_REG(SOR_CRC_B); 891 DUMP_REG(SOR_CRCB);
718 DUMP_REG(SOR_BLANK); 892 DUMP_REG(SOR_BLANK);
719 DUMP_REG(SOR_SEQ_CTL); 893 DUMP_REG(SOR_SEQ_CTL);
720 DUMP_REG(SOR_LANE_SEQ_CTL); 894 DUMP_REG(SOR_LANE_SEQ_CTL);
@@ -736,86 +910,89 @@ static int tegra_sor_show_regs(struct seq_file *s, void *data)
736 DUMP_REG(SOR_SEQ_INST(15)); 910 DUMP_REG(SOR_SEQ_INST(15));
737 DUMP_REG(SOR_PWM_DIV); 911 DUMP_REG(SOR_PWM_DIV);
738 DUMP_REG(SOR_PWM_CTL); 912 DUMP_REG(SOR_PWM_CTL);
739 DUMP_REG(SOR_VCRC_A_0); 913 DUMP_REG(SOR_VCRC_A0);
740 DUMP_REG(SOR_VCRC_A_1); 914 DUMP_REG(SOR_VCRC_A1);
741 DUMP_REG(SOR_VCRC_B_0); 915 DUMP_REG(SOR_VCRC_B0);
742 DUMP_REG(SOR_VCRC_B_1); 916 DUMP_REG(SOR_VCRC_B1);
743 DUMP_REG(SOR_CCRC_A_0); 917 DUMP_REG(SOR_CCRC_A0);
744 DUMP_REG(SOR_CCRC_A_1); 918 DUMP_REG(SOR_CCRC_A1);
745 DUMP_REG(SOR_CCRC_B_0); 919 DUMP_REG(SOR_CCRC_B0);
746 DUMP_REG(SOR_CCRC_B_1); 920 DUMP_REG(SOR_CCRC_B1);
747 DUMP_REG(SOR_EDATA_A_0); 921 DUMP_REG(SOR_EDATA_A0);
748 DUMP_REG(SOR_EDATA_A_1); 922 DUMP_REG(SOR_EDATA_A1);
749 DUMP_REG(SOR_EDATA_B_0); 923 DUMP_REG(SOR_EDATA_B0);
750 DUMP_REG(SOR_EDATA_B_1); 924 DUMP_REG(SOR_EDATA_B1);
751 DUMP_REG(SOR_COUNT_A_0); 925 DUMP_REG(SOR_COUNT_A0);
752 DUMP_REG(SOR_COUNT_A_1); 926 DUMP_REG(SOR_COUNT_A1);
753 DUMP_REG(SOR_COUNT_B_0); 927 DUMP_REG(SOR_COUNT_B0);
754 DUMP_REG(SOR_COUNT_B_1); 928 DUMP_REG(SOR_COUNT_B1);
755 DUMP_REG(SOR_DEBUG_A_0); 929 DUMP_REG(SOR_DEBUG_A0);
756 DUMP_REG(SOR_DEBUG_A_1); 930 DUMP_REG(SOR_DEBUG_A1);
757 DUMP_REG(SOR_DEBUG_B_0); 931 DUMP_REG(SOR_DEBUG_B0);
758 DUMP_REG(SOR_DEBUG_B_1); 932 DUMP_REG(SOR_DEBUG_B1);
759 DUMP_REG(SOR_TRIG); 933 DUMP_REG(SOR_TRIG);
760 DUMP_REG(SOR_MSCHECK); 934 DUMP_REG(SOR_MSCHECK);
761 DUMP_REG(SOR_XBAR_CTRL); 935 DUMP_REG(SOR_XBAR_CTRL);
762 DUMP_REG(SOR_XBAR_POL); 936 DUMP_REG(SOR_XBAR_POL);
763 DUMP_REG(SOR_DP_LINKCTL_0); 937 DUMP_REG(SOR_DP_LINKCTL0);
764 DUMP_REG(SOR_DP_LINKCTL_1); 938 DUMP_REG(SOR_DP_LINKCTL1);
765 DUMP_REG(SOR_LANE_DRIVE_CURRENT_0); 939 DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
766 DUMP_REG(SOR_LANE_DRIVE_CURRENT_1); 940 DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
767 DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0); 941 DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
768 DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1); 942 DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
769 DUMP_REG(SOR_LANE_PREEMPHASIS_0); 943 DUMP_REG(SOR_LANE_PREEMPHASIS0);
770 DUMP_REG(SOR_LANE_PREEMPHASIS_1); 944 DUMP_REG(SOR_LANE_PREEMPHASIS1);
771 DUMP_REG(SOR_LANE4_PREEMPHASIS_0); 945 DUMP_REG(SOR_LANE4_PREEMPHASIS0);
772 DUMP_REG(SOR_LANE4_PREEMPHASIS_1); 946 DUMP_REG(SOR_LANE4_PREEMPHASIS1);
773 DUMP_REG(SOR_LANE_POST_CURSOR_0); 947 DUMP_REG(SOR_LANE_POSTCURSOR0);
774 DUMP_REG(SOR_LANE_POST_CURSOR_1); 948 DUMP_REG(SOR_LANE_POSTCURSOR1);
775 DUMP_REG(SOR_DP_CONFIG_0); 949 DUMP_REG(SOR_DP_CONFIG0);
776 DUMP_REG(SOR_DP_CONFIG_1); 950 DUMP_REG(SOR_DP_CONFIG1);
777 DUMP_REG(SOR_DP_MN_0); 951 DUMP_REG(SOR_DP_MN0);
778 DUMP_REG(SOR_DP_MN_1); 952 DUMP_REG(SOR_DP_MN1);
779 DUMP_REG(SOR_DP_PADCTL_0); 953 DUMP_REG(SOR_DP_PADCTL0);
780 DUMP_REG(SOR_DP_PADCTL_1); 954 DUMP_REG(SOR_DP_PADCTL1);
781 DUMP_REG(SOR_DP_DEBUG_0); 955 DUMP_REG(SOR_DP_DEBUG0);
782 DUMP_REG(SOR_DP_DEBUG_1); 956 DUMP_REG(SOR_DP_DEBUG1);
783 DUMP_REG(SOR_DP_SPARE_0); 957 DUMP_REG(SOR_DP_SPARE0);
784 DUMP_REG(SOR_DP_SPARE_1); 958 DUMP_REG(SOR_DP_SPARE1);
785 DUMP_REG(SOR_DP_AUDIO_CTRL); 959 DUMP_REG(SOR_DP_AUDIO_CTRL);
786 DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS); 960 DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
787 DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS); 961 DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
788 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER); 962 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
789 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0); 963 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
790 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1); 964 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
791 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2); 965 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
792 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3); 966 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
793 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4); 967 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
794 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5); 968 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
795 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6); 969 DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
796 DUMP_REG(SOR_DP_TPG); 970 DUMP_REG(SOR_DP_TPG);
797 DUMP_REG(SOR_DP_TPG_CONFIG); 971 DUMP_REG(SOR_DP_TPG_CONFIG);
798 DUMP_REG(SOR_DP_LQ_CSTM_0); 972 DUMP_REG(SOR_DP_LQ_CSTM0);
799 DUMP_REG(SOR_DP_LQ_CSTM_1); 973 DUMP_REG(SOR_DP_LQ_CSTM1);
800 DUMP_REG(SOR_DP_LQ_CSTM_2); 974 DUMP_REG(SOR_DP_LQ_CSTM2);
801 975
802#undef DUMP_REG 976#undef DUMP_REG
803 977
804 return 0; 978unlock:
979 drm_modeset_unlock_all(drm);
980 return err;
805} 981}
806 982
807static const struct drm_info_list debugfs_files[] = { 983static const struct drm_info_list debugfs_files[] = {
984 { "crc", tegra_sor_show_crc, 0, NULL },
808 { "regs", tegra_sor_show_regs, 0, NULL }, 985 { "regs", tegra_sor_show_regs, 0, NULL },
809}; 986};
810 987
811static int tegra_sor_debugfs_init(struct tegra_sor *sor, 988static int tegra_sor_debugfs_init(struct tegra_sor *sor,
812 struct drm_minor *minor) 989 struct drm_minor *minor)
813{ 990{
814 struct dentry *entry; 991 const char *name = sor->soc->supports_dp ? "sor1" : "sor";
815 unsigned int i; 992 unsigned int i;
816 int err = 0; 993 int err;
817 994
818 sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); 995 sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
819 if (!sor->debugfs) 996 if (!sor->debugfs)
820 return -ENOMEM; 997 return -ENOMEM;
821 998
@@ -835,14 +1012,9 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor,
835 if (err < 0) 1012 if (err < 0)
836 goto free; 1013 goto free;
837 1014
838 entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, 1015 sor->minor = minor;
839 &tegra_sor_crc_fops);
840 if (!entry) {
841 err = -ENOMEM;
842 goto free;
843 }
844 1016
845 return err; 1017 return 0;
846 1018
847free: 1019free:
848 kfree(sor->debugfs_files); 1020 kfree(sor->debugfs_files);
@@ -860,15 +1032,10 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
860 sor->minor = NULL; 1032 sor->minor = NULL;
861 1033
862 kfree(sor->debugfs_files); 1034 kfree(sor->debugfs_files);
863 sor->debugfs = NULL;
864
865 debugfs_remove_recursive(sor->debugfs);
866 sor->debugfs_files = NULL; 1035 sor->debugfs_files = NULL;
867}
868 1036
869static int tegra_sor_connector_dpms(struct drm_connector *connector, int mode) 1037 debugfs_remove_recursive(sor->debugfs);
870{ 1038 sor->debugfs = NULL;
871 return 0;
872} 1039}
873 1040
874static enum drm_connector_status 1041static enum drm_connector_status
@@ -880,11 +1047,11 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
880 if (sor->dpaux) 1047 if (sor->dpaux)
881 return tegra_dpaux_detect(sor->dpaux); 1048 return tegra_dpaux_detect(sor->dpaux);
882 1049
883 return connector_status_unknown; 1050 return tegra_output_connector_detect(connector, force);
884} 1051}
885 1052
886static const struct drm_connector_funcs tegra_sor_connector_funcs = { 1053static const struct drm_connector_funcs tegra_sor_connector_funcs = {
887 .dpms = tegra_sor_connector_dpms, 1054 .dpms = drm_atomic_helper_connector_dpms,
888 .reset = drm_atomic_helper_connector_reset, 1055 .reset = drm_atomic_helper_connector_reset,
889 .detect = tegra_sor_connector_detect, 1056 .detect = tegra_sor_connector_detect,
890 .fill_modes = drm_helper_probe_single_connector_modes, 1057 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -927,22 +1094,102 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
927 .destroy = tegra_output_encoder_destroy, 1094 .destroy = tegra_output_encoder_destroy,
928}; 1095};
929 1096
930static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode) 1097static void tegra_sor_edp_disable(struct drm_encoder *encoder)
931{ 1098{
932} 1099 struct tegra_output *output = encoder_to_output(encoder);
1100 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
1101 struct tegra_sor *sor = to_sor(output);
1102 u32 value;
1103 int err;
933 1104
934static void tegra_sor_encoder_prepare(struct drm_encoder *encoder) 1105 if (output->panel)
935{ 1106 drm_panel_disable(output->panel);
1107
1108 err = tegra_sor_detach(sor);
1109 if (err < 0)
1110 dev_err(sor->dev, "failed to detach SOR: %d\n", err);
1111
1112 tegra_sor_writel(sor, 0, SOR_STATE1);
1113 tegra_sor_update(sor);
1114
1115 /*
1116 * The following accesses registers of the display controller, so make
1117 * sure it's only executed when the output is attached to one.
1118 */
1119 if (dc) {
1120 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
1121 value &= ~SOR_ENABLE;
1122 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
1123
1124 tegra_dc_commit(dc);
1125 }
1126
1127 err = tegra_sor_power_down(sor);
1128 if (err < 0)
1129 dev_err(sor->dev, "failed to power down SOR: %d\n", err);
1130
1131 if (sor->dpaux) {
1132 err = tegra_dpaux_disable(sor->dpaux);
1133 if (err < 0)
1134 dev_err(sor->dev, "failed to disable DP: %d\n", err);
1135 }
1136
1137 err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
1138 if (err < 0)
1139 dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
1140
1141 if (output->panel)
1142 drm_panel_unprepare(output->panel);
1143
1144 reset_control_assert(sor->rst);
1145 clk_disable_unprepare(sor->clk);
936} 1146}
937 1147
938static void tegra_sor_encoder_commit(struct drm_encoder *encoder) 1148#if 0
1149static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
1150 unsigned int *value)
939{ 1151{
1152 unsigned int hfp, hsw, hbp, a = 0, b;
1153
1154 hfp = mode->hsync_start - mode->hdisplay;
1155 hsw = mode->hsync_end - mode->hsync_start;
1156 hbp = mode->htotal - mode->hsync_end;
1157
1158 pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
1159
1160 b = hfp - 1;
1161
1162 pr_info("a: %u, b: %u\n", a, b);
1163 pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
1164
1165 if (a + hsw + hbp <= 11) {
1166 a = 1 + 11 - hsw - hbp;
1167 pr_info("a: %u\n", a);
1168 }
1169
1170 if (a > b)
1171 return -EINVAL;
1172
1173 if (hsw < 1)
1174 return -EINVAL;
1175
1176 if (mode->hdisplay < 16)
1177 return -EINVAL;
1178
1179 if (value) {
1180 if (b > a && a % 2)
1181 *value = a + 1;
1182 else
1183 *value = a;
1184 }
1185
1186 return 0;
940} 1187}
1188#endif
941 1189
942static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, 1190static void tegra_sor_edp_enable(struct drm_encoder *encoder)
943 struct drm_display_mode *mode,
944 struct drm_display_mode *adjusted)
945{ 1191{
1192 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
946 struct tegra_output *output = encoder_to_output(encoder); 1193 struct tegra_output *output = encoder_to_output(encoder);
947 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 1194 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
948 unsigned int vbe, vse, hbe, hse, vbs, hbs, i; 1195 unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
@@ -953,14 +1200,9 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
953 int err = 0; 1200 int err = 0;
954 u32 value; 1201 u32 value;
955 1202
956 mutex_lock(&sor->lock);
957
958 if (sor->enabled)
959 goto unlock;
960
961 err = clk_prepare_enable(sor->clk); 1203 err = clk_prepare_enable(sor->clk);
962 if (err < 0) 1204 if (err < 0)
963 goto unlock; 1205 dev_err(sor->dev, "failed to enable clock: %d\n", err);
964 1206
965 reset_control_deassert(sor->rst); 1207 reset_control_deassert(sor->rst);
966 1208
@@ -979,7 +1221,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
979 if (err < 0) { 1221 if (err < 0) {
980 dev_err(sor->dev, "failed to probe eDP link: %d\n", 1222 dev_err(sor->dev, "failed to probe eDP link: %d\n",
981 err); 1223 err);
982 goto unlock; 1224 return;
983 } 1225 }
984 } 1226 }
985 1227
@@ -1000,40 +1242,40 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1000 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; 1242 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
1001 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 1243 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1002 1244
1003 value = tegra_sor_readl(sor, SOR_PLL_2); 1245 value = tegra_sor_readl(sor, SOR_PLL2);
1004 value &= ~SOR_PLL_2_BANDGAP_POWERDOWN; 1246 value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
1005 tegra_sor_writel(sor, value, SOR_PLL_2); 1247 tegra_sor_writel(sor, value, SOR_PLL2);
1006 usleep_range(20, 100); 1248 usleep_range(20, 100);
1007 1249
1008 value = tegra_sor_readl(sor, SOR_PLL_3); 1250 value = tegra_sor_readl(sor, SOR_PLL3);
1009 value |= SOR_PLL_3_PLL_VDD_MODE_V3_3; 1251 value |= SOR_PLL3_PLL_VDD_MODE_3V3;
1010 tegra_sor_writel(sor, value, SOR_PLL_3); 1252 tegra_sor_writel(sor, value, SOR_PLL3);
1011 1253
1012 value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST | 1254 value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
1013 SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT; 1255 SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
1014 tegra_sor_writel(sor, value, SOR_PLL_0); 1256 tegra_sor_writel(sor, value, SOR_PLL0);
1015 1257
1016 value = tegra_sor_readl(sor, SOR_PLL_2); 1258 value = tegra_sor_readl(sor, SOR_PLL2);
1017 value |= SOR_PLL_2_SEQ_PLLCAPPD; 1259 value |= SOR_PLL2_SEQ_PLLCAPPD;
1018 value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; 1260 value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
1019 value |= SOR_PLL_2_LVDS_ENABLE; 1261 value |= SOR_PLL2_LVDS_ENABLE;
1020 tegra_sor_writel(sor, value, SOR_PLL_2); 1262 tegra_sor_writel(sor, value, SOR_PLL2);
1021 1263
1022 value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM; 1264 value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
1023 tegra_sor_writel(sor, value, SOR_PLL_1); 1265 tegra_sor_writel(sor, value, SOR_PLL1);
1024 1266
1025 while (true) { 1267 while (true) {
1026 value = tegra_sor_readl(sor, SOR_PLL_2); 1268 value = tegra_sor_readl(sor, SOR_PLL2);
1027 if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0) 1269 if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
1028 break; 1270 break;
1029 1271
1030 usleep_range(250, 1000); 1272 usleep_range(250, 1000);
1031 } 1273 }
1032 1274
1033 value = tegra_sor_readl(sor, SOR_PLL_2); 1275 value = tegra_sor_readl(sor, SOR_PLL2);
1034 value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE; 1276 value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
1035 value &= ~SOR_PLL_2_PORT_POWERDOWN; 1277 value &= ~SOR_PLL2_PORT_POWERDOWN;
1036 tegra_sor_writel(sor, value, SOR_PLL_2); 1278 tegra_sor_writel(sor, value, SOR_PLL2);
1037 1279
1038 /* 1280 /*
1039 * power up 1281 * power up
@@ -1046,51 +1288,49 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1046 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 1288 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1047 1289
1048 /* step 1 */ 1290 /* step 1 */
1049 value = tegra_sor_readl(sor, SOR_PLL_2); 1291 value = tegra_sor_readl(sor, SOR_PLL2);
1050 value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN | 1292 value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
1051 SOR_PLL_2_BANDGAP_POWERDOWN; 1293 SOR_PLL2_BANDGAP_POWERDOWN;
1052 tegra_sor_writel(sor, value, SOR_PLL_2); 1294 tegra_sor_writel(sor, value, SOR_PLL2);
1053 1295
1054 value = tegra_sor_readl(sor, SOR_PLL_0); 1296 value = tegra_sor_readl(sor, SOR_PLL0);
1055 value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF; 1297 value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
1056 tegra_sor_writel(sor, value, SOR_PLL_0); 1298 tegra_sor_writel(sor, value, SOR_PLL0);
1057 1299
1058 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 1300 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
1059 value &= ~SOR_DP_PADCTL_PAD_CAL_PD; 1301 value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
1060 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 1302 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
1061 1303
1062 /* step 2 */ 1304 /* step 2 */
1063 err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS); 1305 err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
1064 if (err < 0) { 1306 if (err < 0)
1065 dev_err(sor->dev, "failed to power on I/O rail: %d\n", err); 1307 dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
1066 goto unlock;
1067 }
1068 1308
1069 usleep_range(5, 100); 1309 usleep_range(5, 100);
1070 1310
1071 /* step 3 */ 1311 /* step 3 */
1072 value = tegra_sor_readl(sor, SOR_PLL_2); 1312 value = tegra_sor_readl(sor, SOR_PLL2);
1073 value &= ~SOR_PLL_2_BANDGAP_POWERDOWN; 1313 value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
1074 tegra_sor_writel(sor, value, SOR_PLL_2); 1314 tegra_sor_writel(sor, value, SOR_PLL2);
1075 1315
1076 usleep_range(20, 100); 1316 usleep_range(20, 100);
1077 1317
1078 /* step 4 */ 1318 /* step 4 */
1079 value = tegra_sor_readl(sor, SOR_PLL_0); 1319 value = tegra_sor_readl(sor, SOR_PLL0);
1080 value &= ~SOR_PLL_0_POWER_OFF; 1320 value &= ~SOR_PLL0_VCOPD;
1081 value &= ~SOR_PLL_0_VCOPD; 1321 value &= ~SOR_PLL0_PWR;
1082 tegra_sor_writel(sor, value, SOR_PLL_0); 1322 tegra_sor_writel(sor, value, SOR_PLL0);
1083 1323
1084 value = tegra_sor_readl(sor, SOR_PLL_2); 1324 value = tegra_sor_readl(sor, SOR_PLL2);
1085 value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; 1325 value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
1086 tegra_sor_writel(sor, value, SOR_PLL_2); 1326 tegra_sor_writel(sor, value, SOR_PLL2);
1087 1327
1088 usleep_range(200, 1000); 1328 usleep_range(200, 1000);
1089 1329
1090 /* step 5 */ 1330 /* step 5 */
1091 value = tegra_sor_readl(sor, SOR_PLL_2); 1331 value = tegra_sor_readl(sor, SOR_PLL2);
1092 value &= ~SOR_PLL_2_PORT_POWERDOWN; 1332 value &= ~SOR_PLL2_PORT_POWERDOWN;
1093 tegra_sor_writel(sor, value, SOR_PLL_2); 1333 tegra_sor_writel(sor, value, SOR_PLL2);
1094 1334
1095 /* switch to DP clock */ 1335 /* switch to DP clock */
1096 err = clk_set_parent(sor->clk, sor->clk_dp); 1336 err = clk_set_parent(sor->clk, sor->clk_dp);
@@ -1098,7 +1338,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1098 dev_err(sor->dev, "failed to set DP parent clock: %d\n", err); 1338 dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
1099 1339
1100 /* power DP lanes */ 1340 /* power DP lanes */
1101 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 1341 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
1102 1342
1103 if (link.num_lanes <= 2) 1343 if (link.num_lanes <= 2)
1104 value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2); 1344 value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1115,12 +1355,12 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1115 else 1355 else
1116 value |= SOR_DP_PADCTL_PD_TXD_0; 1356 value |= SOR_DP_PADCTL_PD_TXD_0;
1117 1357
1118 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 1358 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
1119 1359
1120 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); 1360 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
1121 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; 1361 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
1122 value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes); 1362 value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
1123 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); 1363 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
1124 1364
1125 /* start lane sequencer */ 1365 /* start lane sequencer */
1126 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | 1366 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
@@ -1142,14 +1382,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1142 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 1382 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1143 1383
1144 /* set linkctl */ 1384 /* set linkctl */
1145 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); 1385 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
1146 value |= SOR_DP_LINKCTL_ENABLE; 1386 value |= SOR_DP_LINKCTL_ENABLE;
1147 1387
1148 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK; 1388 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
1149 value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size); 1389 value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
1150 1390
1151 value |= SOR_DP_LINKCTL_ENHANCED_FRAME; 1391 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
1152 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); 1392 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
1153 1393
1154 for (i = 0, value = 0; i < 4; i++) { 1394 for (i = 0, value = 0; i < 4; i++) {
1155 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | 1395 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -1160,7 +1400,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1160 1400
1161 tegra_sor_writel(sor, value, SOR_DP_TPG); 1401 tegra_sor_writel(sor, value, SOR_DP_TPG);
1162 1402
1163 value = tegra_sor_readl(sor, SOR_DP_CONFIG_0); 1403 value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
1164 value &= ~SOR_DP_CONFIG_WATERMARK_MASK; 1404 value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
1165 value |= SOR_DP_CONFIG_WATERMARK(config.watermark); 1405 value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
1166 1406
@@ -1177,7 +1417,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1177 1417
1178 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE; 1418 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
1179 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; 1419 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
1180 tegra_sor_writel(sor, value, SOR_DP_CONFIG_0); 1420 tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
1181 1421
1182 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS); 1422 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
1183 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK; 1423 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
@@ -1190,33 +1430,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1190 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS); 1430 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
1191 1431
1192 /* enable pad calibration logic */ 1432 /* enable pad calibration logic */
1193 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 1433 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
1194 value |= SOR_DP_PADCTL_PAD_CAL_PD; 1434 value |= SOR_DP_PADCTL_PAD_CAL_PD;
1195 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 1435 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
1196 1436
1197 if (sor->dpaux) { 1437 if (sor->dpaux) {
1198 u8 rate, lanes; 1438 u8 rate, lanes;
1199 1439
1200 err = drm_dp_link_probe(aux, &link); 1440 err = drm_dp_link_probe(aux, &link);
1201 if (err < 0) { 1441 if (err < 0)
1202 dev_err(sor->dev, "failed to probe eDP link: %d\n", 1442 dev_err(sor->dev, "failed to probe eDP link: %d\n",
1203 err); 1443 err);
1204 goto unlock;
1205 }
1206 1444
1207 err = drm_dp_link_power_up(aux, &link); 1445 err = drm_dp_link_power_up(aux, &link);
1208 if (err < 0) { 1446 if (err < 0)
1209 dev_err(sor->dev, "failed to power up eDP link: %d\n", 1447 dev_err(sor->dev, "failed to power up eDP link: %d\n",
1210 err); 1448 err);
1211 goto unlock;
1212 }
1213 1449
1214 err = drm_dp_link_configure(aux, &link); 1450 err = drm_dp_link_configure(aux, &link);
1215 if (err < 0) { 1451 if (err < 0)
1216 dev_err(sor->dev, "failed to configure eDP link: %d\n", 1452 dev_err(sor->dev, "failed to configure eDP link: %d\n",
1217 err); 1453 err);
1218 goto unlock;
1219 }
1220 1454
1221 rate = drm_dp_link_rate_to_bw_code(link.rate); 1455 rate = drm_dp_link_rate_to_bw_code(link.rate);
1222 lanes = link.num_lanes; 1456 lanes = link.num_lanes;
@@ -1226,14 +1460,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1226 value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); 1460 value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
1227 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 1461 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1228 1462
1229 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); 1463 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
1230 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; 1464 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
1231 value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); 1465 value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
1232 1466
1233 if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) 1467 if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
1234 value |= SOR_DP_LINKCTL_ENHANCED_FRAME; 1468 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
1235 1469
1236 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); 1470 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
1237 1471
1238 /* disable training pattern generator */ 1472 /* disable training pattern generator */
1239 1473
@@ -1250,17 +1484,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1250 if (err < 0) { 1484 if (err < 0) {
1251 dev_err(sor->dev, "DP fast link training failed: %d\n", 1485 dev_err(sor->dev, "DP fast link training failed: %d\n",
1252 err); 1486 err);
1253 goto unlock;
1254 } 1487 }
1255 1488
1256 dev_dbg(sor->dev, "fast link training succeeded\n"); 1489 dev_dbg(sor->dev, "fast link training succeeded\n");
1257 } 1490 }
1258 1491
1259 err = tegra_sor_power_up(sor, 250); 1492 err = tegra_sor_power_up(sor, 250);
1260 if (err < 0) { 1493 if (err < 0)
1261 dev_err(sor->dev, "failed to power up SOR: %d\n", err); 1494 dev_err(sor->dev, "failed to power up SOR: %d\n", err);
1262 goto unlock;
1263 }
1264 1495
1265 /* 1496 /*
1266 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete 1497 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
@@ -1296,7 +1527,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1296 break; 1527 break;
1297 } 1528 }
1298 1529
1299 tegra_sor_writel(sor, value, SOR_STATE_1); 1530 tegra_sor_writel(sor, value, SOR_STATE1);
1300 1531
1301 /* 1532 /*
1302 * TODO: The video timing programming below doesn't seem to match the 1533 * TODO: The video timing programming below doesn't seem to match the
@@ -1304,25 +1535,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1304 */ 1535 */
1305 1536
1306 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); 1537 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
1307 tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0)); 1538 tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
1308 1539
1309 vse = mode->vsync_end - mode->vsync_start - 1; 1540 vse = mode->vsync_end - mode->vsync_start - 1;
1310 hse = mode->hsync_end - mode->hsync_start - 1; 1541 hse = mode->hsync_end - mode->hsync_start - 1;
1311 1542
1312 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); 1543 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
1313 tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0)); 1544 tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
1314 1545
1315 vbe = vse + (mode->vsync_start - mode->vdisplay); 1546 vbe = vse + (mode->vsync_start - mode->vdisplay);
1316 hbe = hse + (mode->hsync_start - mode->hdisplay); 1547 hbe = hse + (mode->hsync_start - mode->hdisplay);
1317 1548
1318 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); 1549 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
1319 tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0)); 1550 tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
1320 1551
1321 vbs = vbe + mode->vdisplay; 1552 vbs = vbe + mode->vdisplay;
1322 hbs = hbe + mode->hdisplay; 1553 hbs = hbe + mode->hdisplay;
1323 1554
1324 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); 1555 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
1325 tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0)); 1556 tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
1557
1558 tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
1326 1559
1327 /* CSTM (LVDS, link A/B, upper) */ 1560 /* CSTM (LVDS, link A/B, upper) */
1328 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | 1561 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
@@ -1331,10 +1564,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1331 1564
1332 /* PWM setup */ 1565 /* PWM setup */
1333 err = tegra_sor_setup_pwm(sor, 250); 1566 err = tegra_sor_setup_pwm(sor, 250);
1334 if (err < 0) { 1567 if (err < 0)
1335 dev_err(sor->dev, "failed to setup PWM: %d\n", err); 1568 dev_err(sor->dev, "failed to setup PWM: %d\n", err);
1336 goto unlock;
1337 }
1338 1569
1339 tegra_sor_update(sor); 1570 tegra_sor_update(sor);
1340 1571
@@ -1345,147 +1576,610 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
1345 tegra_dc_commit(dc); 1576 tegra_dc_commit(dc);
1346 1577
1347 err = tegra_sor_attach(sor); 1578 err = tegra_sor_attach(sor);
1348 if (err < 0) { 1579 if (err < 0)
1349 dev_err(sor->dev, "failed to attach SOR: %d\n", err); 1580 dev_err(sor->dev, "failed to attach SOR: %d\n", err);
1350 goto unlock;
1351 }
1352 1581
1353 err = tegra_sor_wakeup(sor); 1582 err = tegra_sor_wakeup(sor);
1354 if (err < 0) { 1583 if (err < 0)
1355 dev_err(sor->dev, "failed to enable DC: %d\n", err); 1584 dev_err(sor->dev, "failed to enable DC: %d\n", err);
1356 goto unlock;
1357 }
1358 1585
1359 if (output->panel) 1586 if (output->panel)
1360 drm_panel_enable(output->panel); 1587 drm_panel_enable(output->panel);
1361
1362 sor->enabled = true;
1363
1364unlock:
1365 mutex_unlock(&sor->lock);
1366} 1588}
1367 1589
1368static void tegra_sor_encoder_disable(struct drm_encoder *encoder) 1590static int
1591tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
1592 struct drm_crtc_state *crtc_state,
1593 struct drm_connector_state *conn_state)
1369{ 1594{
1370 struct tegra_output *output = encoder_to_output(encoder); 1595 struct tegra_output *output = encoder_to_output(encoder);
1371 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 1596 struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
1597 unsigned long pclk = crtc_state->mode.clock * 1000;
1372 struct tegra_sor *sor = to_sor(output); 1598 struct tegra_sor *sor = to_sor(output);
1373 u32 value;
1374 int err; 1599 int err;
1375 1600
1376 mutex_lock(&sor->lock); 1601 err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
1602 pclk, 0);
1603 if (err < 0) {
1604 dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
1605 return err;
1606 }
1377 1607
1378 if (!sor->enabled) 1608 return 0;
1379 goto unlock; 1609}
1380 1610
1381 if (output->panel) 1611static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
1382 drm_panel_disable(output->panel); 1612 .disable = tegra_sor_edp_disable,
1613 .enable = tegra_sor_edp_enable,
1614 .atomic_check = tegra_sor_encoder_atomic_check,
1615};
1383 1616
1384 err = tegra_sor_detach(sor); 1617static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
1385 if (err < 0) { 1618{
1386 dev_err(sor->dev, "failed to detach SOR: %d\n", err); 1619 u32 value = 0;
1387 goto unlock; 1620 size_t i;
1621
1622 for (i = size; i > 0; i--)
1623 value = (value << 8) | ptr[i - 1];
1624
1625 return value;
1626}
1627
1628static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
1629 const void *data, size_t size)
1630{
1631 const u8 *ptr = data;
1632 unsigned long offset;
1633 size_t i, j;
1634 u32 value;
1635
1636 switch (ptr[0]) {
1637 case HDMI_INFOFRAME_TYPE_AVI:
1638 offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
1639 break;
1640
1641 case HDMI_INFOFRAME_TYPE_AUDIO:
1642 offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
1643 break;
1644
1645 case HDMI_INFOFRAME_TYPE_VENDOR:
1646 offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
1647 break;
1648
1649 default:
1650 dev_err(sor->dev, "unsupported infoframe type: %02x\n",
1651 ptr[0]);
1652 return;
1388 } 1653 }
1389 1654
1390 tegra_sor_writel(sor, 0, SOR_STATE_1); 1655 value = INFOFRAME_HEADER_TYPE(ptr[0]) |
1391 tegra_sor_update(sor); 1656 INFOFRAME_HEADER_VERSION(ptr[1]) |
1657 INFOFRAME_HEADER_LEN(ptr[2]);
1658 tegra_sor_writel(sor, value, offset);
1659 offset++;
1392 1660
1393 /* 1661 /*
1394 * The following accesses registers of the display controller, so make 1662 * Each subpack contains 7 bytes, divided into:
1395 * sure it's only executed when the output is attached to one. 1663 * - subpack_low: bytes 0 - 3
1664 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
1396 */ 1665 */
1397 if (dc) { 1666 for (i = 3, j = 0; i < size; i += 7, j += 8) {
1398 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); 1667 size_t rem = size - i, num = min_t(size_t, rem, 4);
1399 value &= ~SOR_ENABLE;
1400 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
1401 1668
1402 tegra_dc_commit(dc); 1669 value = tegra_sor_hdmi_subpack(&ptr[i], num);
1403 } 1670 tegra_sor_writel(sor, value, offset++);
1404 1671
1405 err = tegra_sor_power_down(sor); 1672 num = min_t(size_t, rem - num, 3);
1406 if (err < 0) { 1673
1407 dev_err(sor->dev, "failed to power down SOR: %d\n", err); 1674 value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
1408 goto unlock; 1675 tegra_sor_writel(sor, value, offset++);
1409 } 1676 }
1677}
1410 1678
1411 if (sor->dpaux) { 1679static int
1412 err = tegra_dpaux_disable(sor->dpaux); 1680tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
1413 if (err < 0) { 1681 const struct drm_display_mode *mode)
1414 dev_err(sor->dev, "failed to disable DP: %d\n", err); 1682{
1415 goto unlock; 1683 u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
1416 } 1684 struct hdmi_avi_infoframe frame;
1685 u32 value;
1686 int err;
1687
1688 /* disable AVI infoframe */
1689 value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
1690 value &= ~INFOFRAME_CTRL_SINGLE;
1691 value &= ~INFOFRAME_CTRL_OTHER;
1692 value &= ~INFOFRAME_CTRL_ENABLE;
1693 tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
1694
1695 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1696 if (err < 0) {
1697 dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
1698 return err;
1417 } 1699 }
1418 1700
1419 err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS); 1701 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1420 if (err < 0) { 1702 if (err < 0) {
1421 dev_err(sor->dev, "failed to power off I/O rail: %d\n", err); 1703 dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
1422 goto unlock; 1704 return err;
1423 } 1705 }
1424 1706
1425 if (output->panel) 1707 tegra_sor_hdmi_write_infopack(sor, buffer, err);
1426 drm_panel_unprepare(output->panel);
1427 1708
1428 clk_disable_unprepare(sor->clk); 1709 /* enable AVI infoframe */
1429 reset_control_assert(sor->rst); 1710 value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
1711 value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
1712 value |= INFOFRAME_CTRL_ENABLE;
1713 tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
1714
1715 return 0;
1716}
1430 1717
1431 sor->enabled = false; 1718static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
1719{
1720 u32 value;
1432 1721
1433unlock: 1722 value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
1434 mutex_unlock(&sor->lock); 1723 value &= ~INFOFRAME_CTRL_ENABLE;
1724 tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
1435} 1725}
1436 1726
1437static int 1727static struct tegra_sor_hdmi_settings *
1438tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, 1728tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
1439 struct drm_crtc_state *crtc_state, 1729{
1440 struct drm_connector_state *conn_state) 1730 unsigned int i;
1731
1732 for (i = 0; i < sor->num_settings; i++)
1733 if (frequency <= sor->settings[i].frequency)
1734 return &sor->settings[i];
1735
1736 return NULL;
1737}
1738
1739static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
1441{ 1740{
1442 struct tegra_output *output = encoder_to_output(encoder); 1741 struct tegra_output *output = encoder_to_output(encoder);
1443 struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); 1742 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
1444 unsigned long pclk = crtc_state->mode.clock * 1000;
1445 struct tegra_sor *sor = to_sor(output); 1743 struct tegra_sor *sor = to_sor(output);
1744 u32 value;
1446 int err; 1745 int err;
1447 1746
1448 err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, 1747 err = tegra_sor_detach(sor);
1449 pclk, 0); 1748 if (err < 0)
1450 if (err < 0) { 1749 dev_err(sor->dev, "failed to detach SOR: %d\n", err);
1451 dev_err(output->dev, "failed to setup CRTC state: %d\n", err); 1750
1452 return err; 1751 tegra_sor_writel(sor, 0, SOR_STATE1);
1752 tegra_sor_update(sor);
1753
1754 /* disable display to SOR clock */
1755 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
1756 value &= ~SOR1_TIMING_CYA;
1757 value &= ~SOR1_ENABLE;
1758 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
1759
1760 tegra_dc_commit(dc);
1761
1762 err = tegra_sor_power_down(sor);
1763 if (err < 0)
1764 dev_err(sor->dev, "failed to power down SOR: %d\n", err);
1765
1766 err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
1767 if (err < 0)
1768 dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
1769
1770 reset_control_assert(sor->rst);
1771 usleep_range(1000, 2000);
1772 clk_disable_unprepare(sor->clk);
1773}
1774
1775static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
1776{
1777 struct tegra_output *output = encoder_to_output(encoder);
1778 unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
1779 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
1780 unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
1781 struct tegra_sor_hdmi_settings *settings;
1782 struct tegra_sor *sor = to_sor(output);
1783 struct drm_display_mode *mode;
1784 struct drm_display_info *info;
1785 u32 value;
1786 int err;
1787
1788 mode = &encoder->crtc->state->adjusted_mode;
1789 info = &output->connector.display_info;
1790
1791 err = clk_prepare_enable(sor->clk);
1792 if (err < 0)
1793 dev_err(sor->dev, "failed to enable clock: %d\n", err);
1794
1795 usleep_range(1000, 2000);
1796
1797 reset_control_deassert(sor->rst);
1798
1799 err = clk_set_parent(sor->clk, sor->clk_safe);
1800 if (err < 0)
1801 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
1802
1803 div = clk_get_rate(sor->clk) / 1000000 * 4;
1804
1805 err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
1806 if (err < 0)
1807 dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
1808
1809 usleep_range(20, 100);
1810
1811 value = tegra_sor_readl(sor, SOR_PLL2);
1812 value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
1813 tegra_sor_writel(sor, value, SOR_PLL2);
1814
1815 usleep_range(20, 100);
1816
1817 value = tegra_sor_readl(sor, SOR_PLL3);
1818 value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
1819 tegra_sor_writel(sor, value, SOR_PLL3);
1820
1821 value = tegra_sor_readl(sor, SOR_PLL0);
1822 value &= ~SOR_PLL0_VCOPD;
1823 value &= ~SOR_PLL0_PWR;
1824 tegra_sor_writel(sor, value, SOR_PLL0);
1825
1826 value = tegra_sor_readl(sor, SOR_PLL2);
1827 value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
1828 tegra_sor_writel(sor, value, SOR_PLL2);
1829
1830 usleep_range(200, 400);
1831
1832 value = tegra_sor_readl(sor, SOR_PLL2);
1833 value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
1834 value &= ~SOR_PLL2_PORT_POWERDOWN;
1835 tegra_sor_writel(sor, value, SOR_PLL2);
1836
1837 usleep_range(20, 100);
1838
1839 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
1840 value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
1841 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
1842 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
1843
1844 while (true) {
1845 value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
1846 if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
1847 break;
1848
1849 usleep_range(250, 1000);
1453 } 1850 }
1454 1851
1455 return 0; 1852 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
1853 SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
1854 tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
1855
1856 while (true) {
1857 value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
1858 if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
1859 break;
1860
1861 usleep_range(250, 1000);
1862 }
1863
1864 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
1865 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
1866 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
1867
1868 if (mode->clock < 340000)
1869 value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
1870 else
1871 value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
1872
1873 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
1874 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1875
1876 value = tegra_sor_readl(sor, SOR_DP_SPARE0);
1877 value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
1878 value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
1879 value |= SOR_DP_SPARE_SEQ_ENABLE;
1880 tegra_sor_writel(sor, value, SOR_DP_SPARE0);
1881
1882 value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
1883 SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
1884 tegra_sor_writel(sor, value, SOR_SEQ_CTL);
1885
1886 value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
1887 SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
1888 tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
1889 tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
1890
1891 /* program the reference clock */
1892 value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
1893 tegra_sor_writel(sor, value, SOR_REFCLK);
1894
1895 /* XXX don't hardcode */
1896 value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
1897 SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
1898 SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
1899 SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
1900 SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
1901 SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
1902 SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
1903 SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
1904 SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
1905 SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
1906 tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
1907
1908 tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
1909
1910 err = clk_set_parent(sor->clk, sor->clk_parent);
1911 if (err < 0)
1912 dev_err(sor->dev, "failed to set parent clock: %d\n", err);
1913
1914 value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
1915
1916 /* XXX is this the proper check? */
1917 if (mode->clock < 75000)
1918 value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
1919
1920 tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
1921
1922 max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
1923
1924 value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
1925 SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
1926 tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
1927
1928 /* H_PULSE2 setup */
1929 pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
1930 (mode->htotal - mode->hsync_end) - 10;
1931
1932 value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
1933 PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
1934 tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
1935
1936 value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
1937 tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
1938
1939 value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
1940 value |= H_PULSE2_ENABLE;
1941 tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
1942
1943 /* infoframe setup */
1944 err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
1945 if (err < 0)
1946 dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
1947
1948 /* XXX HDMI audio support not implemented yet */
1949 tegra_sor_hdmi_disable_audio_infoframe(sor);
1950
1951 /* use single TMDS protocol */
1952 value = tegra_sor_readl(sor, SOR_STATE1);
1953 value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
1954 value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
1955 tegra_sor_writel(sor, value, SOR_STATE1);
1956
1957 /* power up pad calibration */
1958 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
1959 value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
1960 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
1961
1962 /* production settings */
1963 settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
1964 if (IS_ERR(settings)) {
1965 dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
1966 mode->clock * 1000, PTR_ERR(settings));
1967 return;
1968 }
1969
1970 value = tegra_sor_readl(sor, SOR_PLL0);
1971 value &= ~SOR_PLL0_ICHPMP_MASK;
1972 value &= ~SOR_PLL0_VCOCAP_MASK;
1973 value |= SOR_PLL0_ICHPMP(settings->ichpmp);
1974 value |= SOR_PLL0_VCOCAP(settings->vcocap);
1975 tegra_sor_writel(sor, value, SOR_PLL0);
1976
1977 tegra_sor_dp_term_calibrate(sor);
1978
1979 value = tegra_sor_readl(sor, SOR_PLL1);
1980 value &= ~SOR_PLL1_LOADADJ_MASK;
1981 value |= SOR_PLL1_LOADADJ(settings->loadadj);
1982 tegra_sor_writel(sor, value, SOR_PLL1);
1983
1984 value = tegra_sor_readl(sor, SOR_PLL3);
1985 value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
1986 value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
1987 tegra_sor_writel(sor, value, SOR_PLL3);
1988
1989 value = settings->drive_current[0] << 24 |
1990 settings->drive_current[1] << 16 |
1991 settings->drive_current[2] << 8 |
1992 settings->drive_current[3] << 0;
1993 tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
1994
1995 value = settings->preemphasis[0] << 24 |
1996 settings->preemphasis[1] << 16 |
1997 settings->preemphasis[2] << 8 |
1998 settings->preemphasis[3] << 0;
1999 tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
2000
2001 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
2002 value &= ~SOR_DP_PADCTL_TX_PU_MASK;
2003 value |= SOR_DP_PADCTL_TX_PU_ENABLE;
2004 value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
2005 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
2006
2007 /* power down pad calibration */
2008 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
2009 value |= SOR_DP_PADCTL_PAD_CAL_PD;
2010 tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
2011
2012 /* miscellaneous display controller settings */
2013 value = VSYNC_H_POSITION(1);
2014 tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
2015
2016 value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
2017 value &= ~DITHER_CONTROL_MASK;
2018 value &= ~BASE_COLOR_SIZE_MASK;
2019
2020 switch (info->bpc) {
2021 case 6:
2022 value |= BASE_COLOR_SIZE_666;
2023 break;
2024
2025 case 8:
2026 value |= BASE_COLOR_SIZE_888;
2027 break;
2028
2029 default:
2030 WARN(1, "%u bits-per-color not supported\n", info->bpc);
2031 break;
2032 }
2033
2034 tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
2035
2036 err = tegra_sor_power_up(sor, 250);
2037 if (err < 0)
2038 dev_err(sor->dev, "failed to power up SOR: %d\n", err);
2039
2040 /* configure mode */
2041 value = tegra_sor_readl(sor, SOR_STATE1);
2042 value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
2043 value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
2044 value &= ~SOR_STATE_ASY_OWNER_MASK;
2045
2046 value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
2047 SOR_STATE_ASY_OWNER(dc->pipe + 1);
2048
2049 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
2050 value &= ~SOR_STATE_ASY_HSYNCPOL;
2051
2052 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2053 value |= SOR_STATE_ASY_HSYNCPOL;
2054
2055 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
2056 value &= ~SOR_STATE_ASY_VSYNCPOL;
2057
2058 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2059 value |= SOR_STATE_ASY_VSYNCPOL;
2060
2061 switch (info->bpc) {
2062 case 8:
2063 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
2064 break;
2065
2066 case 6:
2067 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
2068 break;
2069
2070 default:
2071 BUG();
2072 break;
2073 }
2074
2075 tegra_sor_writel(sor, value, SOR_STATE1);
2076
2077 value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
2078 value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
2079 value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
2080 tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
2081
2082 value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
2083 value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
2084 value |= SOR_HEAD_STATE_COLORSPACE_RGB;
2085 tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
2086
2087 /*
2088 * TODO: The video timing programming below doesn't seem to match the
2089 * register definitions.
2090 */
2091
2092 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
2093 tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
2094
2095 /* sync end = sync width - 1 */
2096 vse = mode->vsync_end - mode->vsync_start - 1;
2097 hse = mode->hsync_end - mode->hsync_start - 1;
2098
2099 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
2100 tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
2101
2102 /* blank end = sync end + back porch */
2103 vbe = vse + (mode->vtotal - mode->vsync_end);
2104 hbe = hse + (mode->htotal - mode->hsync_end);
2105
2106 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
2107 tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
2108
2109 /* blank start = blank end + active */
2110 vbs = vbe + mode->vdisplay;
2111 hbs = hbe + mode->hdisplay;
2112
2113 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
2114 tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
2115
2116 tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
2117
2118 tegra_sor_update(sor);
2119
2120 err = tegra_sor_attach(sor);
2121 if (err < 0)
2122 dev_err(sor->dev, "failed to attach SOR: %d\n", err);
2123
2124 /* enable display to SOR clock and generate HDMI preamble */
2125 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
2126 value |= SOR1_ENABLE | SOR1_TIMING_CYA;
2127 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
2128
2129 tegra_dc_commit(dc);
2130
2131 err = tegra_sor_wakeup(sor);
2132 if (err < 0)
2133 dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
1456} 2134}
1457 2135
1458static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = { 2136static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
1459 .dpms = tegra_sor_encoder_dpms, 2137 .disable = tegra_sor_hdmi_disable,
1460 .prepare = tegra_sor_encoder_prepare, 2138 .enable = tegra_sor_hdmi_enable,
1461 .commit = tegra_sor_encoder_commit,
1462 .mode_set = tegra_sor_encoder_mode_set,
1463 .disable = tegra_sor_encoder_disable,
1464 .atomic_check = tegra_sor_encoder_atomic_check, 2139 .atomic_check = tegra_sor_encoder_atomic_check,
1465}; 2140};
1466 2141
1467static int tegra_sor_init(struct host1x_client *client) 2142static int tegra_sor_init(struct host1x_client *client)
1468{ 2143{
1469 struct drm_device *drm = dev_get_drvdata(client->parent); 2144 struct drm_device *drm = dev_get_drvdata(client->parent);
2145 const struct drm_encoder_helper_funcs *helpers = NULL;
1470 struct tegra_sor *sor = host1x_client_to_sor(client); 2146 struct tegra_sor *sor = host1x_client_to_sor(client);
2147 int connector = DRM_MODE_CONNECTOR_Unknown;
2148 int encoder = DRM_MODE_ENCODER_NONE;
1471 int err; 2149 int err;
1472 2150
1473 if (!sor->dpaux) 2151 if (!sor->dpaux) {
1474 return -ENODEV; 2152 if (sor->soc->supports_hdmi) {
2153 connector = DRM_MODE_CONNECTOR_HDMIA;
2154 encoder = DRM_MODE_ENCODER_TMDS;
2155 helpers = &tegra_sor_hdmi_helpers;
2156 } else if (sor->soc->supports_lvds) {
2157 connector = DRM_MODE_CONNECTOR_LVDS;
2158 encoder = DRM_MODE_ENCODER_LVDS;
2159 }
2160 } else {
2161 if (sor->soc->supports_edp) {
2162 connector = DRM_MODE_CONNECTOR_eDP;
2163 encoder = DRM_MODE_ENCODER_TMDS;
2164 helpers = &tegra_sor_edp_helpers;
2165 } else if (sor->soc->supports_dp) {
2166 connector = DRM_MODE_CONNECTOR_DisplayPort;
2167 encoder = DRM_MODE_ENCODER_TMDS;
2168 }
2169 }
1475 2170
1476 sor->output.dev = sor->dev; 2171 sor->output.dev = sor->dev;
1477 2172
1478 drm_connector_init(drm, &sor->output.connector, 2173 drm_connector_init(drm, &sor->output.connector,
1479 &tegra_sor_connector_funcs, 2174 &tegra_sor_connector_funcs,
1480 DRM_MODE_CONNECTOR_eDP); 2175 connector);
1481 drm_connector_helper_add(&sor->output.connector, 2176 drm_connector_helper_add(&sor->output.connector,
1482 &tegra_sor_connector_helper_funcs); 2177 &tegra_sor_connector_helper_funcs);
1483 sor->output.connector.dpms = DRM_MODE_DPMS_OFF; 2178 sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
1484 2179
1485 drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, 2180 drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
1486 DRM_MODE_ENCODER_TMDS); 2181 encoder);
1487 drm_encoder_helper_add(&sor->output.encoder, 2182 drm_encoder_helper_add(&sor->output.encoder, helpers);
1488 &tegra_sor_encoder_helper_funcs);
1489 2183
1490 drm_mode_connector_attach_encoder(&sor->output.connector, 2184 drm_mode_connector_attach_encoder(&sor->output.connector,
1491 &sor->output.encoder); 2185 &sor->output.encoder);
@@ -1578,18 +2272,130 @@ static const struct host1x_client_ops sor_client_ops = {
1578 .exit = tegra_sor_exit, 2272 .exit = tegra_sor_exit,
1579}; 2273};
1580 2274
2275static const struct tegra_sor_ops tegra_sor_edp_ops = {
2276 .name = "eDP",
2277};
2278
2279static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
2280{
2281 int err;
2282
2283 sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
2284 if (IS_ERR(sor->avdd_io_supply)) {
2285 dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
2286 PTR_ERR(sor->avdd_io_supply));
2287 return PTR_ERR(sor->avdd_io_supply);
2288 }
2289
2290 err = regulator_enable(sor->avdd_io_supply);
2291 if (err < 0) {
2292 dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
2293 err);
2294 return err;
2295 }
2296
2297 sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
2298 if (IS_ERR(sor->vdd_pll_supply)) {
2299 dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
2300 PTR_ERR(sor->vdd_pll_supply));
2301 return PTR_ERR(sor->vdd_pll_supply);
2302 }
2303
2304 err = regulator_enable(sor->vdd_pll_supply);
2305 if (err < 0) {
2306 dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
2307 err);
2308 return err;
2309 }
2310
2311 sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
2312 if (IS_ERR(sor->hdmi_supply)) {
2313 dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
2314 PTR_ERR(sor->hdmi_supply));
2315 return PTR_ERR(sor->hdmi_supply);
2316 }
2317
2318 err = regulator_enable(sor->hdmi_supply);
2319 if (err < 0) {
2320 dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
2321 return err;
2322 }
2323
2324 return 0;
2325}
2326
2327static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
2328{
2329 regulator_disable(sor->hdmi_supply);
2330 regulator_disable(sor->vdd_pll_supply);
2331 regulator_disable(sor->avdd_io_supply);
2332
2333 return 0;
2334}
2335
2336static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
2337 .name = "HDMI",
2338 .probe = tegra_sor_hdmi_probe,
2339 .remove = tegra_sor_hdmi_remove,
2340};
2341
2342static const struct tegra_sor_soc tegra124_sor = {
2343 .supports_edp = true,
2344 .supports_lvds = true,
2345 .supports_hdmi = false,
2346 .supports_dp = false,
2347};
2348
2349static const struct tegra_sor_soc tegra210_sor = {
2350 .supports_edp = true,
2351 .supports_lvds = false,
2352 .supports_hdmi = false,
2353 .supports_dp = false,
2354};
2355
2356static const struct tegra_sor_soc tegra210_sor1 = {
2357 .supports_edp = false,
2358 .supports_lvds = false,
2359 .supports_hdmi = true,
2360 .supports_dp = true,
2361
2362 .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
2363 .settings = tegra210_sor_hdmi_defaults,
2364};
2365
2366static const struct of_device_id tegra_sor_of_match[] = {
2367 { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
2368 { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
2369 { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
2370 { },
2371};
2372MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
2373
1581static int tegra_sor_probe(struct platform_device *pdev) 2374static int tegra_sor_probe(struct platform_device *pdev)
1582{ 2375{
2376 const struct of_device_id *match;
1583 struct device_node *np; 2377 struct device_node *np;
1584 struct tegra_sor *sor; 2378 struct tegra_sor *sor;
1585 struct resource *regs; 2379 struct resource *regs;
1586 int err; 2380 int err;
1587 2381
2382 match = of_match_device(tegra_sor_of_match, &pdev->dev);
2383
1588 sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL); 2384 sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
1589 if (!sor) 2385 if (!sor)
1590 return -ENOMEM; 2386 return -ENOMEM;
1591 2387
1592 sor->output.dev = sor->dev = &pdev->dev; 2388 sor->output.dev = sor->dev = &pdev->dev;
2389 sor->soc = match->data;
2390
2391 sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
2392 sor->soc->num_settings *
2393 sizeof(*sor->settings),
2394 GFP_KERNEL);
2395 if (!sor->settings)
2396 return -ENOMEM;
2397
2398 sor->num_settings = sor->soc->num_settings;
1593 2399
1594 np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0); 2400 np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
1595 if (np) { 2401 if (np) {
@@ -1600,51 +2406,106 @@ static int tegra_sor_probe(struct platform_device *pdev)
1600 return -EPROBE_DEFER; 2406 return -EPROBE_DEFER;
1601 } 2407 }
1602 2408
2409 if (!sor->dpaux) {
2410 if (sor->soc->supports_hdmi) {
2411 sor->ops = &tegra_sor_hdmi_ops;
2412 } else if (sor->soc->supports_lvds) {
2413 dev_err(&pdev->dev, "LVDS not supported yet\n");
2414 return -ENODEV;
2415 } else {
2416 dev_err(&pdev->dev, "unknown (non-DP) support\n");
2417 return -ENODEV;
2418 }
2419 } else {
2420 if (sor->soc->supports_edp) {
2421 sor->ops = &tegra_sor_edp_ops;
2422 } else if (sor->soc->supports_dp) {
2423 dev_err(&pdev->dev, "DisplayPort not supported yet\n");
2424 return -ENODEV;
2425 } else {
2426 dev_err(&pdev->dev, "unknown (DP) support\n");
2427 return -ENODEV;
2428 }
2429 }
2430
1603 err = tegra_output_probe(&sor->output); 2431 err = tegra_output_probe(&sor->output);
1604 if (err < 0) 2432 if (err < 0) {
2433 dev_err(&pdev->dev, "failed to probe output: %d\n", err);
1605 return err; 2434 return err;
2435 }
2436
2437 if (sor->ops && sor->ops->probe) {
2438 err = sor->ops->probe(sor);
2439 if (err < 0) {
2440 dev_err(&pdev->dev, "failed to probe %s: %d\n",
2441 sor->ops->name, err);
2442 goto output;
2443 }
2444 }
1606 2445
1607 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2446 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1608 sor->regs = devm_ioremap_resource(&pdev->dev, regs); 2447 sor->regs = devm_ioremap_resource(&pdev->dev, regs);
1609 if (IS_ERR(sor->regs)) 2448 if (IS_ERR(sor->regs)) {
1610 return PTR_ERR(sor->regs); 2449 err = PTR_ERR(sor->regs);
2450 goto remove;
2451 }
1611 2452
1612 sor->rst = devm_reset_control_get(&pdev->dev, "sor"); 2453 sor->rst = devm_reset_control_get(&pdev->dev, "sor");
1613 if (IS_ERR(sor->rst)) 2454 if (IS_ERR(sor->rst)) {
1614 return PTR_ERR(sor->rst); 2455 err = PTR_ERR(sor->rst);
2456 dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
2457 goto remove;
2458 }
1615 2459
1616 sor->clk = devm_clk_get(&pdev->dev, NULL); 2460 sor->clk = devm_clk_get(&pdev->dev, NULL);
1617 if (IS_ERR(sor->clk)) 2461 if (IS_ERR(sor->clk)) {
1618 return PTR_ERR(sor->clk); 2462 err = PTR_ERR(sor->clk);
2463 dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
2464 goto remove;
2465 }
1619 2466
1620 sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); 2467 sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
1621 if (IS_ERR(sor->clk_parent)) 2468 if (IS_ERR(sor->clk_parent)) {
1622 return PTR_ERR(sor->clk_parent); 2469 err = PTR_ERR(sor->clk_parent);
2470 dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
2471 goto remove;
2472 }
1623 2473
1624 sor->clk_safe = devm_clk_get(&pdev->dev, "safe"); 2474 sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
1625 if (IS_ERR(sor->clk_safe)) 2475 if (IS_ERR(sor->clk_safe)) {
1626 return PTR_ERR(sor->clk_safe); 2476 err = PTR_ERR(sor->clk_safe);
2477 dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
2478 goto remove;
2479 }
1627 2480
1628 sor->clk_dp = devm_clk_get(&pdev->dev, "dp"); 2481 sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
1629 if (IS_ERR(sor->clk_dp)) 2482 if (IS_ERR(sor->clk_dp)) {
1630 return PTR_ERR(sor->clk_dp); 2483 err = PTR_ERR(sor->clk_dp);
2484 dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
2485 goto remove;
2486 }
1631 2487
1632 INIT_LIST_HEAD(&sor->client.list); 2488 INIT_LIST_HEAD(&sor->client.list);
1633 sor->client.ops = &sor_client_ops; 2489 sor->client.ops = &sor_client_ops;
1634 sor->client.dev = &pdev->dev; 2490 sor->client.dev = &pdev->dev;
1635 2491
1636 mutex_init(&sor->lock);
1637
1638 err = host1x_client_register(&sor->client); 2492 err = host1x_client_register(&sor->client);
1639 if (err < 0) { 2493 if (err < 0) {
1640 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 2494 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1641 err); 2495 err);
1642 return err; 2496 goto remove;
1643 } 2497 }
1644 2498
1645 platform_set_drvdata(pdev, sor); 2499 platform_set_drvdata(pdev, sor);
1646 2500
1647 return 0; 2501 return 0;
2502
2503remove:
2504 if (sor->ops && sor->ops->remove)
2505 sor->ops->remove(sor);
2506output:
2507 tegra_output_remove(&sor->output);
2508 return err;
1648} 2509}
1649 2510
1650static int tegra_sor_remove(struct platform_device *pdev) 2511static int tegra_sor_remove(struct platform_device *pdev)
@@ -1659,17 +2520,17 @@ static int tegra_sor_remove(struct platform_device *pdev)
1659 return err; 2520 return err;
1660 } 2521 }
1661 2522
2523 if (sor->ops && sor->ops->remove) {
2524 err = sor->ops->remove(sor);
2525 if (err < 0)
2526 dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
2527 }
2528
1662 tegra_output_remove(&sor->output); 2529 tegra_output_remove(&sor->output);
1663 2530
1664 return 0; 2531 return 0;
1665} 2532}
1666 2533
1667static const struct of_device_id tegra_sor_of_match[] = {
1668 { .compatible = "nvidia,tegra124-sor", },
1669 { },
1670};
1671MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
1672
1673struct platform_driver tegra_sor_driver = { 2534struct platform_driver tegra_sor_driver = {
1674 .driver = { 2535 .driver = {
1675 .name = "tegra-sor", 2536 .name = "tegra-sor",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index a5f8853fedb5..2d31d027e3f6 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -11,9 +11,9 @@
11 11
12#define SOR_CTXSW 0x00 12#define SOR_CTXSW 0x00
13 13
14#define SOR_SUPER_STATE_0 0x01 14#define SOR_SUPER_STATE0 0x01
15 15
16#define SOR_SUPER_STATE_1 0x02 16#define SOR_SUPER_STATE1 0x02
17#define SOR_SUPER_STATE_ATTACHED (1 << 3) 17#define SOR_SUPER_STATE_ATTACHED (1 << 3)
18#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2) 18#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2)
19#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0) 19#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0)
@@ -21,9 +21,9 @@
21#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0) 21#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0)
22#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0) 22#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0)
23 23
24#define SOR_STATE_0 0x03 24#define SOR_STATE0 0x03
25 25
26#define SOR_STATE_1 0x04 26#define SOR_STATE1 0x04
27#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17) 27#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
28#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17) 28#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
29#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17) 29#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
@@ -33,19 +33,27 @@
33#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8) 33#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8)
34#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8) 34#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8)
35#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8) 35#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8)
36#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
36#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8) 37#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8)
37#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6) 38#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6)
38#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6) 39#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
39#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6) 40#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
40#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6) 41#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
42#define SOR_STATE_ASY_OWNER_MASK 0xf
41#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0) 43#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
42 44
43#define SOR_HEAD_STATE_0(x) (0x05 + (x)) 45#define SOR_HEAD_STATE0(x) (0x05 + (x))
44#define SOR_HEAD_STATE_1(x) (0x07 + (x)) 46#define SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
45#define SOR_HEAD_STATE_2(x) (0x09 + (x)) 47#define SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
46#define SOR_HEAD_STATE_3(x) (0x0b + (x)) 48#define SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
47#define SOR_HEAD_STATE_4(x) (0x0d + (x)) 49#define SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
48#define SOR_HEAD_STATE_5(x) (0x0f + (x)) 50#define SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
51#define SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
52#define SOR_HEAD_STATE1(x) (0x07 + (x))
53#define SOR_HEAD_STATE2(x) (0x09 + (x))
54#define SOR_HEAD_STATE3(x) (0x0b + (x))
55#define SOR_HEAD_STATE4(x) (0x0d + (x))
56#define SOR_HEAD_STATE5(x) (0x0f + (x))
49#define SOR_CRC_CNTRL 0x11 57#define SOR_CRC_CNTRL 0x11
50#define SOR_CRC_CNTRL_ENABLE (1 << 0) 58#define SOR_CRC_CNTRL_ENABLE (1 << 0)
51#define SOR_DP_DEBUG_MVID 0x12 59#define SOR_DP_DEBUG_MVID 0x12
@@ -75,62 +83,101 @@
75#define SOR_TEST_HEAD_MODE_MASK (3 << 8) 83#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
76#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8) 84#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
77 85
78#define SOR_PLL_0 0x17 86#define SOR_PLL0 0x17
79#define SOR_PLL_0_ICHPMP_MASK (0xf << 24) 87#define SOR_PLL0_ICHPMP_MASK (0xf << 24)
80#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24) 88#define SOR_PLL0_ICHPMP(x) (((x) & 0xf) << 24)
81#define SOR_PLL_0_VCOCAP_MASK (0xf << 8) 89#define SOR_PLL0_VCOCAP_MASK (0xf << 8)
82#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8) 90#define SOR_PLL0_VCOCAP(x) (((x) & 0xf) << 8)
83#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3) 91#define SOR_PLL0_VCOCAP_RST SOR_PLL0_VCOCAP(3)
84#define SOR_PLL_0_PLLREG_MASK (0x3 << 6) 92#define SOR_PLL0_PLLREG_MASK (0x3 << 6)
85#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6) 93#define SOR_PLL0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
86#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0) 94#define SOR_PLL0_PLLREG_LEVEL_V25 SOR_PLL0_PLLREG_LEVEL(0)
87#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1) 95#define SOR_PLL0_PLLREG_LEVEL_V15 SOR_PLL0_PLLREG_LEVEL(1)
88#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2) 96#define SOR_PLL0_PLLREG_LEVEL_V35 SOR_PLL0_PLLREG_LEVEL(2)
89#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3) 97#define SOR_PLL0_PLLREG_LEVEL_V45 SOR_PLL0_PLLREG_LEVEL(3)
90#define SOR_PLL_0_PULLDOWN (1 << 5) 98#define SOR_PLL0_PULLDOWN (1 << 5)
91#define SOR_PLL_0_RESISTOR_EXT (1 << 4) 99#define SOR_PLL0_RESISTOR_EXT (1 << 4)
92#define SOR_PLL_0_VCOPD (1 << 2) 100#define SOR_PLL0_VCOPD (1 << 2)
93#define SOR_PLL_0_POWER_OFF (1 << 0) 101#define SOR_PLL0_PWR (1 << 0)
94 102
95#define SOR_PLL_1 0x18 103#define SOR_PLL1 0x18
96/* XXX: read-only bit? */ 104/* XXX: read-only bit? */
97#define SOR_PLL_1_TERM_COMPOUT (1 << 15) 105#define SOR_PLL1_LOADADJ_MASK (0xf << 20)
98#define SOR_PLL_1_TMDS_TERM (1 << 8) 106#define SOR_PLL1_LOADADJ(x) (((x) & 0xf) << 20)
99 107#define SOR_PLL1_TERM_COMPOUT (1 << 15)
100#define SOR_PLL_2 0x19 108#define SOR_PLL1_TMDS_TERMADJ_MASK (0xf << 9)
101#define SOR_PLL_2_LVDS_ENABLE (1 << 25) 109#define SOR_PLL1_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
102#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24) 110#define SOR_PLL1_TMDS_TERM (1 << 8)
103#define SOR_PLL_2_PORT_POWERDOWN (1 << 23) 111
104#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22) 112#define SOR_PLL2 0x19
105#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18) 113#define SOR_PLL2_LVDS_ENABLE (1 << 25)
106#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17) 114#define SOR_PLL2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
107 115#define SOR_PLL2_PORT_POWERDOWN (1 << 23)
108#define SOR_PLL_3 0x1a 116#define SOR_PLL2_BANDGAP_POWERDOWN (1 << 22)
109#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13) 117#define SOR_PLL2_POWERDOWN_OVERRIDE (1 << 18)
110#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13) 118#define SOR_PLL2_SEQ_PLLCAPPD (1 << 17)
119#define SOR_PLL2_SEQ_PLL_PULLDOWN (1 << 16)
120
121#define SOR_PLL3 0x1a
122#define SOR_PLL3_BG_VREF_LEVEL_MASK (0xf << 24)
123#define SOR_PLL3_BG_VREF_LEVEL(x) (((x) & 0xf) << 24)
124#define SOR_PLL3_PLL_VDD_MODE_1V8 (0 << 13)
125#define SOR_PLL3_PLL_VDD_MODE_3V3 (1 << 13)
111 126
112#define SOR_CSTM 0x1b 127#define SOR_CSTM 0x1b
128#define SOR_CSTM_ROTCLK_MASK (0xf << 24)
129#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
113#define SOR_CSTM_LVDS (1 << 16) 130#define SOR_CSTM_LVDS (1 << 16)
114#define SOR_CSTM_LINK_ACT_B (1 << 15) 131#define SOR_CSTM_LINK_ACT_B (1 << 15)
115#define SOR_CSTM_LINK_ACT_A (1 << 14) 132#define SOR_CSTM_LINK_ACT_A (1 << 14)
116#define SOR_CSTM_UPPER (1 << 11) 133#define SOR_CSTM_UPPER (1 << 11)
117 134
118#define SOR_LVDS 0x1c 135#define SOR_LVDS 0x1c
119#define SOR_CRC_A 0x1d 136#define SOR_CRCA 0x1d
120#define SOR_CRC_A_VALID (1 << 0) 137#define SOR_CRCA_VALID (1 << 0)
121#define SOR_CRC_A_RESET (1 << 0) 138#define SOR_CRCA_RESET (1 << 0)
122#define SOR_CRC_B 0x1e 139#define SOR_CRCB 0x1e
123#define SOR_BLANK 0x1f 140#define SOR_BLANK 0x1f
124#define SOR_SEQ_CTL 0x20 141#define SOR_SEQ_CTL 0x20
142#define SOR_SEQ_CTL_PD_PC_ALT(x) (((x) & 0xf) << 12)
143#define SOR_SEQ_CTL_PD_PC(x) (((x) & 0xf) << 8)
144#define SOR_SEQ_CTL_PU_PC_ALT(x) (((x) & 0xf) << 4)
145#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
125 146
126#define SOR_LANE_SEQ_CTL 0x21 147#define SOR_LANE_SEQ_CTL 0x21
127#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31) 148#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31)
149#define SOR_LANE_SEQ_CTL_STATE_BUSY (1 << 28)
128#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20) 150#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20)
129#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20) 151#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20)
130#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16) 152#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16)
131#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16) 153#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16)
154#define SOR_LANE_SEQ_CTL_DELAY(x) (((x) & 0xf) << 12)
132 155
133#define SOR_SEQ_INST(x) (0x22 + (x)) 156#define SOR_SEQ_INST(x) (0x22 + (x))
157#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
158#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
159#define SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
160#define SOR_SEQ_INST_BLANK_V (1 << 28)
161#define SOR_SEQ_INST_BLANK_H (1 << 27)
162#define SOR_SEQ_INST_BLANK_DE (1 << 26)
163#define SOR_SEQ_INST_BLACK_DATA (1 << 25)
164#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
165#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
166#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
167#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
168#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
169#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
170#define SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
171#define SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
172#define SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
173#define SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
174#define SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
175#define SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
176#define SOR_SEQ_INST_HALT (1 << 15)
177#define SOR_SEQ_INST_WAIT_US (0 << 12)
178#define SOR_SEQ_INST_WAIT_MS (1 << 12)
179#define SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
180#define SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
134 181
135#define SOR_PWM_DIV 0x32 182#define SOR_PWM_DIV 0x32
136#define SOR_PWM_DIV_MASK 0xffffff 183#define SOR_PWM_DIV_MASK 0xffffff
@@ -140,32 +187,36 @@
140#define SOR_PWM_CTL_CLK_SEL (1 << 30) 187#define SOR_PWM_CTL_CLK_SEL (1 << 30)
141#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff 188#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff
142 189
143#define SOR_VCRC_A_0 0x34 190#define SOR_VCRC_A0 0x34
144#define SOR_VCRC_A_1 0x35 191#define SOR_VCRC_A1 0x35
145#define SOR_VCRC_B_0 0x36 192#define SOR_VCRC_B0 0x36
146#define SOR_VCRC_B_1 0x37 193#define SOR_VCRC_B1 0x37
147#define SOR_CCRC_A_0 0x38 194#define SOR_CCRC_A0 0x38
148#define SOR_CCRC_A_1 0x39 195#define SOR_CCRC_A1 0x39
149#define SOR_CCRC_B_0 0x3a 196#define SOR_CCRC_B0 0x3a
150#define SOR_CCRC_B_1 0x3b 197#define SOR_CCRC_B1 0x3b
151#define SOR_EDATA_A_0 0x3c 198#define SOR_EDATA_A0 0x3c
152#define SOR_EDATA_A_1 0x3d 199#define SOR_EDATA_A1 0x3d
153#define SOR_EDATA_B_0 0x3e 200#define SOR_EDATA_B0 0x3e
154#define SOR_EDATA_B_1 0x3f 201#define SOR_EDATA_B1 0x3f
155#define SOR_COUNT_A_0 0x40 202#define SOR_COUNT_A0 0x40
156#define SOR_COUNT_A_1 0x41 203#define SOR_COUNT_A1 0x41
157#define SOR_COUNT_B_0 0x42 204#define SOR_COUNT_B0 0x42
158#define SOR_COUNT_B_1 0x43 205#define SOR_COUNT_B1 0x43
159#define SOR_DEBUG_A_0 0x44 206#define SOR_DEBUG_A0 0x44
160#define SOR_DEBUG_A_1 0x45 207#define SOR_DEBUG_A1 0x45
161#define SOR_DEBUG_B_0 0x46 208#define SOR_DEBUG_B0 0x46
162#define SOR_DEBUG_B_1 0x47 209#define SOR_DEBUG_B1 0x47
163#define SOR_TRIG 0x48 210#define SOR_TRIG 0x48
164#define SOR_MSCHECK 0x49 211#define SOR_MSCHECK 0x49
165#define SOR_XBAR_CTRL 0x4a 212#define SOR_XBAR_CTRL 0x4a
213#define SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
214#define SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 2)
215#define SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
216#define SOR_XBAR_CTRL_BYPASS (1 << 0)
166#define SOR_XBAR_POL 0x4b 217#define SOR_XBAR_POL 0x4b
167 218
168#define SOR_DP_LINKCTL_0 0x4c 219#define SOR_DP_LINKCTL0 0x4c
169#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16) 220#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16)
170#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16) 221#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16)
171#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14) 222#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14)
@@ -173,34 +224,34 @@
173#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2) 224#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2)
174#define SOR_DP_LINKCTL_ENABLE (1 << 0) 225#define SOR_DP_LINKCTL_ENABLE (1 << 0)
175 226
176#define SOR_DP_LINKCTL_1 0x4d 227#define SOR_DP_LINKCTL1 0x4d
177 228
178#define SOR_LANE_DRIVE_CURRENT_0 0x4e 229#define SOR_LANE_DRIVE_CURRENT0 0x4e
179#define SOR_LANE_DRIVE_CURRENT_1 0x4f 230#define SOR_LANE_DRIVE_CURRENT1 0x4f
180#define SOR_LANE4_DRIVE_CURRENT_0 0x50 231#define SOR_LANE4_DRIVE_CURRENT0 0x50
181#define SOR_LANE4_DRIVE_CURRENT_1 0x51 232#define SOR_LANE4_DRIVE_CURRENT1 0x51
182#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24) 233#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
183#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16) 234#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
184#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8) 235#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
185#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0) 236#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
186 237
187#define SOR_LANE_PREEMPHASIS_0 0x52 238#define SOR_LANE_PREEMPHASIS0 0x52
188#define SOR_LANE_PREEMPHASIS_1 0x53 239#define SOR_LANE_PREEMPHASIS1 0x53
189#define SOR_LANE4_PREEMPHASIS_0 0x54 240#define SOR_LANE4_PREEMPHASIS0 0x54
190#define SOR_LANE4_PREEMPHASIS_1 0x55 241#define SOR_LANE4_PREEMPHASIS1 0x55
191#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24) 242#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
192#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16) 243#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
193#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8) 244#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
194#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0) 245#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
195 246
196#define SOR_LANE_POST_CURSOR_0 0x56 247#define SOR_LANE_POSTCURSOR0 0x56
197#define SOR_LANE_POST_CURSOR_1 0x57 248#define SOR_LANE_POSTCURSOR1 0x57
198#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24) 249#define SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
199#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16) 250#define SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
200#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8) 251#define SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
201#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0) 252#define SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
202 253
203#define SOR_DP_CONFIG_0 0x58 254#define SOR_DP_CONFIG0 0x58
204#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31) 255#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31)
205#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26) 256#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26)
206#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24) 257#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24)
@@ -211,11 +262,11 @@
211#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0) 262#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0)
212#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0) 263#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0)
213 264
214#define SOR_DP_CONFIG_1 0x59 265#define SOR_DP_CONFIG1 0x59
215#define SOR_DP_MN_0 0x5a 266#define SOR_DP_MN0 0x5a
216#define SOR_DP_MN_1 0x5b 267#define SOR_DP_MN1 0x5b
217 268
218#define SOR_DP_PADCTL_0 0x5c 269#define SOR_DP_PADCTL0 0x5c
219#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23) 270#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23)
220#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22) 271#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22)
221#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8) 272#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8)
@@ -229,17 +280,18 @@
229#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1) 280#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
230#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0) 281#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
231 282
232#define SOR_DP_PADCTL_1 0x5d 283#define SOR_DP_PADCTL1 0x5d
233 284
234#define SOR_DP_DEBUG_0 0x5e 285#define SOR_DP_DEBUG0 0x5e
235#define SOR_DP_DEBUG_1 0x5f 286#define SOR_DP_DEBUG1 0x5f
236 287
237#define SOR_DP_SPARE_0 0x60 288#define SOR_DP_SPARE0 0x60
238#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2) 289#define SOR_DP_SPARE_DISP_VIDEO_PREAMBLE (1 << 3)
239#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1) 290#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
240#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0) 291#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
292#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
241 293
242#define SOR_DP_SPARE_1 0x61 294#define SOR_DP_SPARE1 0x61
243#define SOR_DP_AUDIO_CTRL 0x62 295#define SOR_DP_AUDIO_CTRL 0x62
244 296
245#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63 297#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
@@ -249,13 +301,13 @@
249#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0) 301#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
250 302
251#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65 303#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
252#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66 304#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
253#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67 305#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
254#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68 306#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
255#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69 307#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
256#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a 308#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
257#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b 309#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
258#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c 310#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
259 311
260#define SOR_DP_TPG 0x6d 312#define SOR_DP_TPG 0x6d
261#define SOR_DP_TPG_CHANNEL_CODING (1 << 6) 313#define SOR_DP_TPG_CHANNEL_CODING (1 << 6)
@@ -275,8 +327,44 @@
275#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0) 327#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0)
276 328
277#define SOR_DP_TPG_CONFIG 0x6e 329#define SOR_DP_TPG_CONFIG 0x6e
278#define SOR_DP_LQ_CSTM_0 0x6f 330#define SOR_DP_LQ_CSTM0 0x6f
279#define SOR_DP_LQ_CSTM_1 0x70 331#define SOR_DP_LQ_CSTM1 0x70
280#define SOR_DP_LQ_CSTM_2 0x71 332#define SOR_DP_LQ_CSTM2 0x71
333
334#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
335#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
336#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
337
338#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
339#define INFOFRAME_CTRL_CHECKSUM_ENABLE (1 << 9)
340#define INFOFRAME_CTRL_SINGLE (1 << 8)
341#define INFOFRAME_CTRL_OTHER (1 << 4)
342#define INFOFRAME_CTRL_ENABLE (1 << 0)
343
344#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
345#define INFOFRAME_STATUS_DONE (1 << 0)
346
347#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
348#define INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
349#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
350#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
351
352#define SOR_HDMI_CTRL 0xc0
353#define SOR_HDMI_CTRL_ENABLE (1 << 30)
354#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
355#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
356#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
357
358#define SOR_REFCLK 0xe6
359#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
360#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
361
362#define SOR_INPUT_CONTROL 0xe8
363#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
364#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
365
366#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
367#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
368#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
281 369
282#endif 370#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf080abc86d1..4e19d0f9cc30 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -340,7 +340,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
340 swap_storage = shmem_file_setup("ttm swap", 340 swap_storage = shmem_file_setup("ttm swap",
341 ttm->num_pages << PAGE_SHIFT, 341 ttm->num_pages << PAGE_SHIFT,
342 0); 342 0);
343 if (unlikely(IS_ERR(swap_storage))) { 343 if (IS_ERR(swap_storage)) {
344 pr_err("Failed allocating swap storage\n"); 344 pr_err("Failed allocating swap storage\n");
345 return PTR_ERR(swap_storage); 345 return PTR_ERR(swap_storage);
346 } 346 }
@@ -354,7 +354,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
354 if (unlikely(from_page == NULL)) 354 if (unlikely(from_page == NULL))
355 continue; 355 continue;
356 to_page = shmem_read_mapping_page(swap_space, i); 356 to_page = shmem_read_mapping_page(swap_space, i);
357 if (unlikely(IS_ERR(to_page))) { 357 if (IS_ERR(to_page)) {
358 ret = PTR_ERR(to_page); 358 ret = PTR_ERR(to_page);
359 goto out_err; 359 goto out_err;
360 } 360 }
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 5fc16cecd3ba..62c7b1dafaa4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -288,7 +288,7 @@ static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
288{ 288{
289 struct udl_fbdev *ufbdev = info->par; 289 struct udl_fbdev *ufbdev = info->par;
290 290
291 sys_fillrect(info, rect); 291 drm_fb_helper_sys_fillrect(info, rect);
292 292
293 udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width, 293 udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
294 rect->height); 294 rect->height);
@@ -298,7 +298,7 @@ static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *regi
298{ 298{
299 struct udl_fbdev *ufbdev = info->par; 299 struct udl_fbdev *ufbdev = info->par;
300 300
301 sys_copyarea(info, region); 301 drm_fb_helper_sys_copyarea(info, region);
302 302
303 udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width, 303 udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
304 region->height); 304 region->height);
@@ -308,7 +308,7 @@ static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
308{ 308{
309 struct udl_fbdev *ufbdev = info->par; 309 struct udl_fbdev *ufbdev = info->par;
310 310
311 sys_imageblit(info, image); 311 drm_fb_helper_sys_imageblit(info, image);
312 312
313 udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width, 313 udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
314 image->height); 314 image->height);
@@ -476,7 +476,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
476 container_of(helper, struct udl_fbdev, helper); 476 container_of(helper, struct udl_fbdev, helper);
477 struct drm_device *dev = ufbdev->helper.dev; 477 struct drm_device *dev = ufbdev->helper.dev;
478 struct fb_info *info; 478 struct fb_info *info;
479 struct device *device = dev->dev;
480 struct drm_framebuffer *fb; 479 struct drm_framebuffer *fb;
481 struct drm_mode_fb_cmd2 mode_cmd; 480 struct drm_mode_fb_cmd2 mode_cmd;
482 struct udl_gem_object *obj; 481 struct udl_gem_object *obj;
@@ -506,21 +505,20 @@ static int udlfb_create(struct drm_fb_helper *helper,
506 goto out_gfree; 505 goto out_gfree;
507 } 506 }
508 507
509 info = framebuffer_alloc(0, device); 508 info = drm_fb_helper_alloc_fbi(helper);
510 if (!info) { 509 if (IS_ERR(info)) {
511 ret = -ENOMEM; 510 ret = PTR_ERR(info);
512 goto out_gfree; 511 goto out_gfree;
513 } 512 }
514 info->par = ufbdev; 513 info->par = ufbdev;
515 514
516 ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj); 515 ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
517 if (ret) 516 if (ret)
518 goto out_gfree; 517 goto out_destroy_fbi;
519 518
520 fb = &ufbdev->ufb.base; 519 fb = &ufbdev->ufb.base;
521 520
522 ufbdev->helper.fb = fb; 521 ufbdev->helper.fb = fb;
523 ufbdev->helper.fbdev = info;
524 522
525 strcpy(info->fix.id, "udldrmfb"); 523 strcpy(info->fix.id, "udldrmfb");
526 524
@@ -533,18 +531,13 @@ static int udlfb_create(struct drm_fb_helper *helper,
533 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 531 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
534 drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); 532 drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
535 533
536 ret = fb_alloc_cmap(&info->cmap, 256, 0);
537 if (ret) {
538 ret = -ENOMEM;
539 goto out_gfree;
540 }
541
542
543 DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", 534 DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
544 fb->width, fb->height, 535 fb->width, fb->height,
545 ufbdev->ufb.obj->vmapping); 536 ufbdev->ufb.obj->vmapping);
546 537
547 return ret; 538 return ret;
539out_destroy_fbi:
540 drm_fb_helper_release_fbi(helper);
548out_gfree: 541out_gfree:
549 drm_gem_object_unreference(&ufbdev->ufb.obj->base); 542 drm_gem_object_unreference(&ufbdev->ufb.obj->base);
550out: 543out:
@@ -558,14 +551,8 @@ static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
558static void udl_fbdev_destroy(struct drm_device *dev, 551static void udl_fbdev_destroy(struct drm_device *dev,
559 struct udl_fbdev *ufbdev) 552 struct udl_fbdev *ufbdev)
560{ 553{
561 struct fb_info *info; 554 drm_fb_helper_unregister_fbi(&ufbdev->helper);
562 if (ufbdev->helper.fbdev) { 555 drm_fb_helper_release_fbi(&ufbdev->helper);
563 info = ufbdev->helper.fbdev;
564 unregister_framebuffer(info);
565 if (info->cmap.len)
566 fb_dealloc_cmap(&info->cmap);
567 framebuffer_release(info);
568 }
569 drm_fb_helper_fini(&ufbdev->helper); 556 drm_fb_helper_fini(&ufbdev->helper);
570 drm_framebuffer_unregister_private(&ufbdev->ufb.base); 557 drm_framebuffer_unregister_private(&ufbdev->ufb.base);
571 drm_framebuffer_cleanup(&ufbdev->ufb.base); 558 drm_framebuffer_cleanup(&ufbdev->ufb.base);
@@ -631,11 +618,7 @@ void udl_fbdev_unplug(struct drm_device *dev)
631 return; 618 return;
632 619
633 ufbdev = udl->fbdev; 620 ufbdev = udl->fbdev;
634 if (ufbdev->helper.fbdev) { 621 drm_fb_helper_unlink_fbi(&ufbdev->helper);
635 struct fb_info *info;
636 info = ufbdev->helper.fbdev;
637 unlink_framebuffer(info);
638 }
639} 622}
640 623
641struct drm_framebuffer * 624struct drm_framebuffer *
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index df198d9e770c..6a81e084593b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -173,7 +173,7 @@ static void virtio_gpu_3d_fillrect(struct fb_info *info,
173 const struct fb_fillrect *rect) 173 const struct fb_fillrect *rect)
174{ 174{
175 struct virtio_gpu_fbdev *vfbdev = info->par; 175 struct virtio_gpu_fbdev *vfbdev = info->par;
176 sys_fillrect(info, rect); 176 drm_fb_helper_sys_fillrect(info, rect);
177 virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy, 177 virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
178 rect->width, rect->height); 178 rect->width, rect->height);
179 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); 179 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -183,7 +183,7 @@ static void virtio_gpu_3d_copyarea(struct fb_info *info,
183 const struct fb_copyarea *area) 183 const struct fb_copyarea *area)
184{ 184{
185 struct virtio_gpu_fbdev *vfbdev = info->par; 185 struct virtio_gpu_fbdev *vfbdev = info->par;
186 sys_copyarea(info, area); 186 drm_fb_helper_sys_copyarea(info, area);
187 virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy, 187 virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
188 area->width, area->height); 188 area->width, area->height);
189 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); 189 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -193,7 +193,7 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
193 const struct fb_image *image) 193 const struct fb_image *image)
194{ 194{
195 struct virtio_gpu_fbdev *vfbdev = info->par; 195 struct virtio_gpu_fbdev *vfbdev = info->par;
196 sys_imageblit(info, image); 196 drm_fb_helper_sys_imageblit(info, image);
197 virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy, 197 virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
198 image->width, image->height); 198 image->width, image->height);
199 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); 199 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -230,7 +230,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
230 struct drm_framebuffer *fb; 230 struct drm_framebuffer *fb;
231 struct drm_mode_fb_cmd2 mode_cmd = {}; 231 struct drm_mode_fb_cmd2 mode_cmd = {};
232 struct virtio_gpu_object *obj; 232 struct virtio_gpu_object *obj;
233 struct device *device = vgdev->dev;
234 uint32_t resid, format, size; 233 uint32_t resid, format, size;
235 int ret; 234 int ret;
236 235
@@ -317,18 +316,12 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
317 if (ret) 316 if (ret)
318 goto err_obj_attach; 317 goto err_obj_attach;
319 318
320 info = framebuffer_alloc(0, device); 319 info = drm_fb_helper_alloc_fbi(helper);
321 if (!info) { 320 if (IS_ERR(info)) {
322 ret = -ENOMEM; 321 ret = PTR_ERR(info);
323 goto err_fb_alloc; 322 goto err_fb_alloc;
324 } 323 }
325 324
326 ret = fb_alloc_cmap(&info->cmap, 256, 0);
327 if (ret) {
328 ret = -ENOMEM;
329 goto err_fb_alloc_cmap;
330 }
331
332 info->par = helper; 325 info->par = helper;
333 326
334 ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb, 327 ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
@@ -339,7 +332,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
339 fb = &vfbdev->vgfb.base; 332 fb = &vfbdev->vgfb.base;
340 333
341 vfbdev->helper.fb = fb; 334 vfbdev->helper.fb = fb;
342 vfbdev->helper.fbdev = info;
343 335
344 strcpy(info->fix.id, "virtiodrmfb"); 336 strcpy(info->fix.id, "virtiodrmfb");
345 info->flags = FBINFO_DEFAULT; 337 info->flags = FBINFO_DEFAULT;
@@ -357,9 +349,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
357 return 0; 349 return 0;
358 350
359err_fb_init: 351err_fb_init:
360 fb_dealloc_cmap(&info->cmap); 352 drm_fb_helper_release_fbi(helper);
361err_fb_alloc_cmap:
362 framebuffer_release(info);
363err_fb_alloc: 353err_fb_alloc:
364 virtio_gpu_cmd_resource_inval_backing(vgdev, resid); 354 virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
365err_obj_attach: 355err_obj_attach:
@@ -371,15 +361,11 @@ err_obj_vmap:
371static int virtio_gpu_fbdev_destroy(struct drm_device *dev, 361static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
372 struct virtio_gpu_fbdev *vgfbdev) 362 struct virtio_gpu_fbdev *vgfbdev)
373{ 363{
374 struct fb_info *info;
375 struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb; 364 struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
376 365
377 if (vgfbdev->helper.fbdev) { 366 drm_fb_helper_unregister_fbi(&vgfbdev->helper);
378 info = vgfbdev->helper.fbdev; 367 drm_fb_helper_release_fbi(&vgfbdev->helper);
379 368
380 unregister_framebuffer(info);
381 framebuffer_release(info);
382 }
383 if (vgfb->obj) 369 if (vgfb->obj)
384 vgfb->obj = NULL; 370 vgfb->obj = NULL;
385 drm_fb_helper_fini(&vgfbdev->helper); 371 drm_fb_helper_fini(&vgfbdev->helper);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab951f507..d281575bbe11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ 9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
10 vmwgfx_cmdbuf_res.o \ 10 vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
11 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
11 12
12obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 13obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 000000000000..8cce7f15b6eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
1/*
2 * Intentionally empty file.
3 */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 000000000000..9ce2466a5d00
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
1/**********************************************************
2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_caps.h --
28 *
29 * Definitions for SVGA3D hardware capabilities. Capabilities
30 * are used to query for optional rendering features during
31 * driver initialization. The capability data is stored as very
32 * basic key/value dictionary within the "FIFO register" memory
33 * area at the beginning of BAR2.
34 *
35 * Note that these definitions are only for 3D capabilities.
36 * The SVGA device also has "device capabilities" and "FIFO
37 * capabilities", which are non-3D-specific and are stored as
38 * bitfields rather than key/value pairs.
39 */
40
41#ifndef _SVGA3D_CAPS_H_
42#define _SVGA3D_CAPS_H_
43
44#define INCLUDE_ALLOW_MODULE
45#define INCLUDE_ALLOW_USERLEVEL
46
47#include "includeCheck.h"
48
49#include "svga_reg.h"
50
51#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \
52 SVGA_FIFO_3D_CAPS + 1)
53
54
55/*
56 * SVGA3dCapsRecordType
57 *
58 * Record types that can be found in the caps block.
59 * Related record types are grouped together numerically so that
60 * SVGA3dCaps_FindRecord() can be applied on a range of record
61 * types.
62 */
63
64typedef enum {
65 SVGA3DCAPS_RECORD_UNKNOWN = 0,
66 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
67 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
68 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
69} SVGA3dCapsRecordType;
70
71
72/*
73 * SVGA3dCapsRecordHeader
74 *
75 * Header field leading each caps block record. Contains the offset (in
76 * register words, NOT bytes) to the next caps block record (or the end
77 * of caps block records which will be a zero word) and the record type
78 * as defined above.
79 */
80
81typedef
82#include "vmware_pack_begin.h"
83struct SVGA3dCapsRecordHeader {
84 uint32 length;
85 SVGA3dCapsRecordType type;
86}
87#include "vmware_pack_end.h"
88SVGA3dCapsRecordHeader;
89
90
91/*
92 * SVGA3dCapsRecord
93 *
94 * Caps block record; "data" is a placeholder for the actual data structure
95 * contained within the record;
96 */
97
98typedef
99#include "vmware_pack_begin.h"
100struct SVGA3dCapsRecord {
101 SVGA3dCapsRecordHeader header;
102 uint32 data[1];
103}
104#include "vmware_pack_end.h"
105SVGA3dCapsRecord;
106
107
108typedef uint32 SVGA3dCapPair[2];
109
110#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 000000000000..2dfd57c5f463
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
1/**********************************************************
2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_cmd.h --
28 *
29 * SVGA 3d hardware cmd definitions
30 */
31
32#ifndef _SVGA3D_CMD_H_
33#define _SVGA3D_CMD_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40#include "svga3d_types.h"
41
42/*
43 * Identifiers for commands in the command FIFO.
44 *
45 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
46 * the SVGA3D protocol and remain reserved; they should not be used in the
47 * future.
48 *
49 * IDs between 1040 and 1999 (inclusive) are available for use by the
50 * current SVGA3D protocol.
51 *
52 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
53 * and up.
54 */
55
56typedef enum {
57 SVGA_3D_CMD_LEGACY_BASE = 1000,
58 SVGA_3D_CMD_BASE = 1040,
59
60 SVGA_3D_CMD_SURFACE_DEFINE = 1040,
61 SVGA_3D_CMD_SURFACE_DESTROY = 1041,
62 SVGA_3D_CMD_SURFACE_COPY = 1042,
63 SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043,
64 SVGA_3D_CMD_SURFACE_DMA = 1044,
65 SVGA_3D_CMD_CONTEXT_DEFINE = 1045,
66 SVGA_3D_CMD_CONTEXT_DESTROY = 1046,
67 SVGA_3D_CMD_SETTRANSFORM = 1047,
68 SVGA_3D_CMD_SETZRANGE = 1048,
69 SVGA_3D_CMD_SETRENDERSTATE = 1049,
70 SVGA_3D_CMD_SETRENDERTARGET = 1050,
71 SVGA_3D_CMD_SETTEXTURESTATE = 1051,
72 SVGA_3D_CMD_SETMATERIAL = 1052,
73 SVGA_3D_CMD_SETLIGHTDATA = 1053,
74 SVGA_3D_CMD_SETLIGHTENABLED = 1054,
75 SVGA_3D_CMD_SETVIEWPORT = 1055,
76 SVGA_3D_CMD_SETCLIPPLANE = 1056,
77 SVGA_3D_CMD_CLEAR = 1057,
78 SVGA_3D_CMD_PRESENT = 1058,
79 SVGA_3D_CMD_SHADER_DEFINE = 1059,
80 SVGA_3D_CMD_SHADER_DESTROY = 1060,
81 SVGA_3D_CMD_SET_SHADER = 1061,
82 SVGA_3D_CMD_SET_SHADER_CONST = 1062,
83 SVGA_3D_CMD_DRAW_PRIMITIVES = 1063,
84 SVGA_3D_CMD_SETSCISSORRECT = 1064,
85 SVGA_3D_CMD_BEGIN_QUERY = 1065,
86 SVGA_3D_CMD_END_QUERY = 1066,
87 SVGA_3D_CMD_WAIT_FOR_QUERY = 1067,
88 SVGA_3D_CMD_PRESENT_READBACK = 1068,
89 SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
90 SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
91 SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
92 SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
93 SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
94 SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
95 SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
96 SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
97 SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
98 SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
99 SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
100 SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
101 SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
102 SVGA_3D_CMD_SCREEN_DMA = 1082,
103 SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
104 SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
105
106 SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
107 SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
108 SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
109 SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
110 SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
111 SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
112
113 SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
114 SVGA_3D_CMD_READBACK_OTABLE = 1092,
115
116 SVGA_3D_CMD_DEFINE_GB_MOB = 1093,
117 SVGA_3D_CMD_DESTROY_GB_MOB = 1094,
118 SVGA_3D_CMD_DEAD3 = 1095,
119 SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096,
120
121 SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097,
122 SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098,
123 SVGA_3D_CMD_BIND_GB_SURFACE = 1099,
124 SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100,
125 SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101,
126 SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102,
127 SVGA_3D_CMD_READBACK_GB_IMAGE = 1103,
128 SVGA_3D_CMD_READBACK_GB_SURFACE = 1104,
129 SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105,
130 SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106,
131
132 SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107,
133 SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108,
134 SVGA_3D_CMD_BIND_GB_CONTEXT = 1109,
135 SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110,
136 SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111,
137
138 SVGA_3D_CMD_DEFINE_GB_SHADER = 1112,
139 SVGA_3D_CMD_DESTROY_GB_SHADER = 1113,
140 SVGA_3D_CMD_BIND_GB_SHADER = 1114,
141
142 SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115,
143
144 SVGA_3D_CMD_BEGIN_GB_QUERY = 1116,
145 SVGA_3D_CMD_END_GB_QUERY = 1117,
146 SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118,
147
148 SVGA_3D_CMD_NOP = 1119,
149
150 SVGA_3D_CMD_ENABLE_GART = 1120,
151 SVGA_3D_CMD_DISABLE_GART = 1121,
152 SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122,
153 SVGA_3D_CMD_UNMAP_GART_RANGE = 1123,
154
155 SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124,
156 SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125,
157 SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126,
158 SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127,
159
160 SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128,
161 SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129,
162
163 SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130,
164
165 SVGA_3D_CMD_GB_SCREEN_DMA = 1131,
166 SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132,
167 SVGA_3D_CMD_GB_MOB_FENCE = 1133,
168 SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134,
169 SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135,
170 SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136,
171 SVGA_3D_CMD_NOP_ERROR = 1137,
172
173 SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138,
174 SVGA_3D_CMD_SET_VERTEX_DECLS = 1139,
175 SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140,
176 SVGA_3D_CMD_DRAW = 1141,
177 SVGA_3D_CMD_DRAW_INDEXED = 1142,
178
179 /*
180 * DX10 Commands
181 */
182 SVGA_3D_CMD_DX_MIN = 1143,
183 SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143,
184 SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144,
185 SVGA_3D_CMD_DX_BIND_CONTEXT = 1145,
186 SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146,
187 SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147,
188 SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148,
189 SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149,
190 SVGA_3D_CMD_DX_SET_SHADER = 1150,
191 SVGA_3D_CMD_DX_SET_SAMPLERS = 1151,
192 SVGA_3D_CMD_DX_DRAW = 1152,
193 SVGA_3D_CMD_DX_DRAW_INDEXED = 1153,
194 SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154,
195 SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155,
196 SVGA_3D_CMD_DX_DRAW_AUTO = 1156,
197 SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157,
198 SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158,
199 SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159,
200 SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160,
201 SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161,
202 SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162,
203 SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163,
204 SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164,
205 SVGA_3D_CMD_DX_DEFINE_QUERY = 1165,
206 SVGA_3D_CMD_DX_DESTROY_QUERY = 1166,
207 SVGA_3D_CMD_DX_BIND_QUERY = 1167,
208 SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168,
209 SVGA_3D_CMD_DX_BEGIN_QUERY = 1169,
210 SVGA_3D_CMD_DX_END_QUERY = 1170,
211 SVGA_3D_CMD_DX_READBACK_QUERY = 1171,
212 SVGA_3D_CMD_DX_SET_PREDICATION = 1172,
213 SVGA_3D_CMD_DX_SET_SOTARGETS = 1173,
214 SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174,
215 SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175,
216 SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176,
217 SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
218 SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
219 SVGA_3D_CMD_DX_PRED_COPY = 1179,
220 SVGA_3D_CMD_DX_STRETCHBLT = 1180,
221 SVGA_3D_CMD_DX_GENMIPS = 1181,
222 SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
223 SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
224 SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184,
225 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185,
226 SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186,
227 SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187,
228 SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188,
229 SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189,
230 SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190,
231 SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191,
232 SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192,
233 SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193,
234 SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194,
235 SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195,
236 SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196,
237 SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197,
238 SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198,
239 SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199,
240 SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200,
241 SVGA_3D_CMD_DX_DEFINE_SHADER = 1201,
242 SVGA_3D_CMD_DX_DESTROY_SHADER = 1202,
243 SVGA_3D_CMD_DX_BIND_SHADER = 1203,
244 SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204,
245 SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205,
246 SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206,
247 SVGA_3D_CMD_DX_SET_COTABLE = 1207,
248 SVGA_3D_CMD_DX_READBACK_COTABLE = 1208,
249 SVGA_3D_CMD_DX_BUFFER_COPY = 1209,
250 SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210,
251 SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211,
252 SVGA_3D_CMD_DX_MOVE_QUERY = 1212,
253 SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213,
254 SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
255 SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
256 SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
257 SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
258 SVGA_3D_CMD_DX_HINT = 1218,
259 SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
260 SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
261 SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
262 SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
263
264 /*
265 * Reserve some IDs to be used for the DX11 shader types.
266 */
267 SVGA_3D_CMD_DX_RESERVED1 = 1223,
268 SVGA_3D_CMD_DX_RESERVED2 = 1224,
269 SVGA_3D_CMD_DX_RESERVED3 = 1225,
270
271 SVGA_3D_CMD_DX_MAX = 1226,
272 SVGA_3D_CMD_MAX = 1226,
273 SVGA_3D_CMD_FUTURE_MAX = 3000
274} SVGAFifo3dCmdId;
275
276/*
277 * FIFO command format definitions:
278 */
279
280/*
281 * The data size header following cmdNum for every 3d command
282 */
283typedef
284#include "vmware_pack_begin.h"
285struct {
286 uint32 id;
287 uint32 size;
288}
289#include "vmware_pack_end.h"
290SVGA3dCmdHeader;
291
292typedef
293#include "vmware_pack_begin.h"
294struct {
295 uint32 numMipLevels;
296}
297#include "vmware_pack_end.h"
298SVGA3dSurfaceFace;
299
300typedef
301#include "vmware_pack_begin.h"
302struct {
303 uint32 sid;
304 SVGA3dSurfaceFlags surfaceFlags;
305 SVGA3dSurfaceFormat format;
306 /*
307 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
308 * structures must have the same value of numMipLevels field.
309 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
310 * numMipLevels set to 0.
311 */
312 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
313 /*
314 * Followed by an SVGA3dSize structure for each mip level in each face.
315 *
316 * A note on surface sizes: Sizes are always specified in pixels,
317 * even if the true surface size is not a multiple of the minimum
318 * block size of the surface's format. For example, a 3x3x1 DXT1
319 * compressed texture would actually be stored as a 4x4x1 image in
320 * memory.
321 */
322}
323#include "vmware_pack_end.h"
324SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
325
326typedef
327#include "vmware_pack_begin.h"
328struct {
329 uint32 sid;
330 SVGA3dSurfaceFlags surfaceFlags;
331 SVGA3dSurfaceFormat format;
332 /*
333 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
334 * structures must have the same value of numMipLevels field.
335 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
336 * numMipLevels set to 0.
337 */
338 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
339 uint32 multisampleCount;
340 SVGA3dTextureFilter autogenFilter;
341 /*
342 * Followed by an SVGA3dSize structure for each mip level in each face.
343 *
344 * A note on surface sizes: Sizes are always specified in pixels,
345 * even if the true surface size is not a multiple of the minimum
346 * block size of the surface's format. For example, a 3x3x1 DXT1
347 * compressed texture would actually be stored as a 4x4x1 image in
348 * memory.
349 */
350}
351#include "vmware_pack_end.h"
352SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
353
354typedef
355#include "vmware_pack_begin.h"
356struct {
357 uint32 sid;
358}
359#include "vmware_pack_end.h"
360SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
361
362typedef
363#include "vmware_pack_begin.h"
364struct {
365 uint32 cid;
366}
367#include "vmware_pack_end.h"
368SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
369
370typedef
371#include "vmware_pack_begin.h"
372struct {
373 uint32 cid;
374}
375#include "vmware_pack_end.h"
376SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
377
378typedef
379#include "vmware_pack_begin.h"
380struct {
381 uint32 cid;
382 SVGA3dClearFlag clearFlag;
383 uint32 color;
384 float depth;
385 uint32 stencil;
386 /* Followed by variable number of SVGA3dRect structures */
387}
388#include "vmware_pack_end.h"
389SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
390
391typedef
392#include "vmware_pack_begin.h"
393struct {
394 SVGA3dLightType type;
395 SVGA3dBool inWorldSpace;
396 float diffuse[4];
397 float specular[4];
398 float ambient[4];
399 float position[4];
400 float direction[4];
401 float range;
402 float falloff;
403 float attenuation0;
404 float attenuation1;
405 float attenuation2;
406 float theta;
407 float phi;
408}
409#include "vmware_pack_end.h"
410SVGA3dLightData;
411
412typedef
413#include "vmware_pack_begin.h"
414struct {
415 uint32 sid;
416 /* Followed by variable number of SVGA3dCopyRect structures */
417}
418#include "vmware_pack_end.h"
419SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
420
421typedef
422#include "vmware_pack_begin.h"
423struct {
424 SVGA3dRenderStateName state;
425 union {
426 uint32 uintValue;
427 float floatValue;
428 };
429}
430#include "vmware_pack_end.h"
431SVGA3dRenderState;
432
433typedef
434#include "vmware_pack_begin.h"
435struct {
436 uint32 cid;
437 /* Followed by variable number of SVGA3dRenderState structures */
438}
439#include "vmware_pack_end.h"
440SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
441
442typedef
443#include "vmware_pack_begin.h"
444struct {
445 uint32 cid;
446 SVGA3dRenderTargetType type;
447 SVGA3dSurfaceImageId target;
448}
449#include "vmware_pack_end.h"
450SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
451
452typedef
453#include "vmware_pack_begin.h"
454struct {
455 SVGA3dSurfaceImageId src;
456 SVGA3dSurfaceImageId dest;
457 /* Followed by variable number of SVGA3dCopyBox structures */
458}
459#include "vmware_pack_end.h"
460SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
461
462typedef
463#include "vmware_pack_begin.h"
464struct {
465 SVGA3dSurfaceImageId src;
466 SVGA3dSurfaceImageId dest;
467 SVGA3dBox boxSrc;
468 SVGA3dBox boxDest;
469 SVGA3dStretchBltMode mode;
470}
471#include "vmware_pack_end.h"
472SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
473
474typedef
475#include "vmware_pack_begin.h"
476struct {
477 /*
478 * If the discard flag is present in a surface DMA operation, the host may
479 * discard the contents of the current mipmap level and face of the target
480 * surface before applying the surface DMA contents.
481 */
482 uint32 discard : 1;
483
484 /*
485 * If the unsynchronized flag is present, the host may perform this upload
486 * without syncing to pending reads on this surface.
487 */
488 uint32 unsynchronized : 1;
489
490 /*
491 * Guests *MUST* set the reserved bits to 0 before submitting the command
492 * suffix as future flags may occupy these bits.
493 */
494 uint32 reserved : 30;
495}
496#include "vmware_pack_end.h"
497SVGA3dSurfaceDMAFlags;
498
499typedef
500#include "vmware_pack_begin.h"
501struct {
502 SVGAGuestImage guest;
503 SVGA3dSurfaceImageId host;
504 SVGA3dTransferType transfer;
505 /*
506 * Followed by variable number of SVGA3dCopyBox structures. For consistency
507 * in all clipping logic and coordinate translation, we define the
508 * "source" in each copyBox as the guest image and the
509 * "destination" as the host image, regardless of transfer
510 * direction.
511 *
512 * For efficiency, the SVGA3D device is free to copy more data than
513 * specified. For example, it may round copy boxes outwards such
514 * that they lie on particular alignment boundaries.
515 */
516}
517#include "vmware_pack_end.h"
518SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
519
520/*
521 * SVGA3dCmdSurfaceDMASuffix --
522 *
523 * This is a command suffix that will appear after a SurfaceDMA command in
524 * the FIFO. It contains some extra information that hosts may use to
525 * optimize performance or protect the guest. This suffix exists to preserve
526 * backwards compatibility while also allowing for new functionality to be
527 * implemented.
528 */
529
530typedef
531#include "vmware_pack_begin.h"
532struct {
533 uint32 suffixSize;
534
535 /*
536 * The maximum offset is used to determine the maximum offset from the
537 * guestPtr base address that will be accessed or written to during this
538 * surfaceDMA. If the suffix is supported, the host will respect this
539 * boundary while performing surface DMAs.
540 *
541 * Defaults to MAX_UINT32
542 */
543 uint32 maximumOffset;
544
545 /*
546 * A set of flags that describes optimizations that the host may perform
547 * while performing this surface DMA operation. The guest should never rely
548 * on behaviour that is different when these flags are set for correctness.
549 *
550 * Defaults to 0
551 */
552 SVGA3dSurfaceDMAFlags flags;
553}
554#include "vmware_pack_end.h"
555SVGA3dCmdSurfaceDMASuffix;
556
557/*
558 * SVGA_3D_CMD_DRAW_PRIMITIVES --
559 *
560 * This command is the SVGA3D device's generic drawing entry point.
561 * It can draw multiple ranges of primitives, optionally using an
562 * index buffer, using an arbitrary collection of vertex buffers.
563 *
564 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
565 * during this draw call. The declarations specify which surface
566 * the vertex data lives in, what that vertex data is used for,
567 * and how to interpret it.
568 *
569 * Each SVGA3dPrimitiveRange defines a collection of primitives
570 * to render using the same vertex arrays. An index buffer is
571 * optional.
572 */
573
574typedef
575#include "vmware_pack_begin.h"
576struct {
577 /*
578 * A range hint is an optional specification for the range of indices
579 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
580 * that the entire array will be used.
581 *
582 * These are only hints. The SVGA3D device may use them for
583 * performance optimization if possible, but it's also allowed to
584 * ignore these values.
585 */
586 uint32 first;
587 uint32 last;
588}
589#include "vmware_pack_end.h"
590SVGA3dArrayRangeHint;
591
592typedef
593#include "vmware_pack_begin.h"
594struct {
595 /*
596 * Define the origin and shape of a vertex or index array. Both
597 * 'offset' and 'stride' are in bytes. The provided surface will be
598 * reinterpreted as a flat array of bytes in the same format used
599 * by surface DMA operations. To avoid unnecessary conversions, the
600 * surface should be created with the SVGA3D_BUFFER format.
601 *
602 * Index 0 in the array starts 'offset' bytes into the surface.
603 * Index 1 begins at byte 'offset + stride', etc. Array indices may
604 * not be negative.
605 */
606 uint32 surfaceId;
607 uint32 offset;
608 uint32 stride;
609}
610#include "vmware_pack_end.h"
611SVGA3dArray;
612
613typedef
614#include "vmware_pack_begin.h"
615struct {
616 /*
617 * Describe a vertex array's data type, and define how it is to be
618 * used by the fixed function pipeline or the vertex shader. It
619 * isn't useful to have two VertexDecls with the same
620 * VertexArrayIdentity in one draw call.
621 */
622 SVGA3dDeclType type;
623 SVGA3dDeclMethod method;
624 SVGA3dDeclUsage usage;
625 uint32 usageIndex;
626}
627#include "vmware_pack_end.h"
628SVGA3dVertexArrayIdentity;
629
630typedef
631#include "vmware_pack_begin.h"
632struct SVGA3dVertexDecl {
633 SVGA3dVertexArrayIdentity identity;
634 SVGA3dArray array;
635 SVGA3dArrayRangeHint rangeHint;
636}
637#include "vmware_pack_end.h"
638SVGA3dVertexDecl;
639
640typedef
641#include "vmware_pack_begin.h"
642struct SVGA3dPrimitiveRange {
643 /*
644 * Define a group of primitives to render, from sequential indices.
645 *
646 * The value of 'primitiveType' and 'primitiveCount' imply the
647 * total number of vertices that will be rendered.
648 */
649 SVGA3dPrimitiveType primType;
650 uint32 primitiveCount;
651
652 /*
653 * Optional index buffer. If indexArray.surfaceId is
654 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
655 * without an index buffer is identical to rendering with an index
656 * buffer containing the sequence [0, 1, 2, 3, ...].
657 *
658 * If an index buffer is in use, indexWidth specifies the width in
659 * bytes of each index value. It must be less than or equal to
660 * indexArray.stride.
661 *
662 * (Currently, the SVGA3D device requires index buffers to be tightly
663 * packed. In other words, indexWidth == indexArray.stride)
664 */
665 SVGA3dArray indexArray;
666 uint32 indexWidth;
667
668 /*
669 * Optional index bias. This number is added to all indices from
670 * indexArray before they are used as vertex array indices. This
671 * can be used in multiple ways:
672 *
673 * - When not using an indexArray, this bias can be used to
674 * specify where in the vertex arrays to begin rendering.
675 *
676 * - A positive number here is equivalent to increasing the
677 * offset in each vertex array.
678 *
679 * - A negative number can be used to render using a small
680 * vertex array and an index buffer that contains large
681 * values. This may be used by some applications that
682 * crop a vertex buffer without modifying their index
683 * buffer.
684 *
685 * Note that rendering with a negative bias value may be slower and
686 * use more memory than rendering with a positive or zero bias.
687 */
688 int32 indexBias;
689}
690#include "vmware_pack_end.h"
691SVGA3dPrimitiveRange;
692
693typedef
694#include "vmware_pack_begin.h"
695struct {
696 uint32 cid;
697 uint32 numVertexDecls;
698 uint32 numRanges;
699
700 /*
701 * There are two variable size arrays after the
702 * SVGA3dCmdDrawPrimitives structure. In order,
703 * they are:
704 *
705 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
706 * SVGA3D_MAX_VERTEX_ARRAYS;
707 * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
708 * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
709 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
710 * the frequency divisor for the corresponding vertex decl).
711 */
712}
713#include "vmware_pack_end.h"
714SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
715
716typedef
717#include "vmware_pack_begin.h"
718struct {
719 uint32 cid;
720
721 uint32 primitiveCount; /* How many primitives to render */
722 uint32 startVertexLocation; /* Which vertex do we start rendering at. */
723
724 uint8 primitiveType; /* SVGA3dPrimitiveType */
725 uint8 padding[3];
726}
727#include "vmware_pack_end.h"
728SVGA3dCmdDraw;
729
730typedef
731#include "vmware_pack_begin.h"
732struct {
733 uint32 cid;
734
735 uint8 primitiveType; /* SVGA3dPrimitiveType */
736
737 uint32 indexBufferSid; /* Valid index buffer sid. */
738 uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
739 /* always 0 for DX9 guests, non-zero for OpenGL */
740 /* guests. We can't represent non-multiple of */
741 /* stride offsets in D3D9Renderer... */
742 uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
743
744 int32 baseVertexLocation; /* Bias applied to the index when selecting a */
745 /* vertex from the streams, may be negative */
746
747 uint32 primitiveCount; /* How many primitives to render */
748 uint32 pad0;
749 uint16 pad1;
750}
751#include "vmware_pack_end.h"
752SVGA3dCmdDrawIndexed;
753
754typedef
755#include "vmware_pack_begin.h"
756struct {
757 /*
758 * Describe a vertex array's data type, and define how it is to be
759 * used by the fixed function pipeline or the vertex shader. It
760 * isn't useful to have two VertexDecls with the same
761 * VertexArrayIdentity in one draw call.
762 */
763 uint16 streamOffset;
764 uint8 stream;
765 uint8 type; /* SVGA3dDeclType */
766 uint8 method; /* SVGA3dDeclMethod */
767 uint8 usage; /* SVGA3dDeclUsage */
768 uint8 usageIndex;
769 uint8 padding;
770
771}
772#include "vmware_pack_end.h"
773SVGA3dVertexElement;
774
775typedef
776#include "vmware_pack_begin.h"
777struct {
778 uint32 cid;
779
780 uint32 numElements;
781
782 /*
783 * Followed by numElements SVGA3dVertexElement structures.
784 *
785 * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
786 * are cleared and will not be used by following draws.
787 */
788}
789#include "vmware_pack_end.h"
790SVGA3dCmdSetVertexDecls;
791
792typedef
793#include "vmware_pack_begin.h"
794struct {
795 uint32 sid;
796 uint32 stride;
797 uint32 offset;
798}
799#include "vmware_pack_end.h"
800SVGA3dVertexStream;
801
802typedef
803#include "vmware_pack_begin.h"
804struct {
805 uint32 cid;
806
807 uint32 numStreams;
808 /*
809 * Followed by numStream SVGA3dVertexStream structures.
810 *
811 * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
812 * are cleared and will not be used by following draws.
813 */
814}
815#include "vmware_pack_end.h"
816SVGA3dCmdSetVertexStreams;
817
818typedef
819#include "vmware_pack_begin.h"
820struct {
821 uint32 cid;
822 uint32 numDivisors;
823}
824#include "vmware_pack_end.h"
825SVGA3dCmdSetVertexDivisors;
826
827typedef
828#include "vmware_pack_begin.h"
829struct {
830 uint32 stage;
831 SVGA3dTextureStateName name;
832 union {
833 uint32 value;
834 float floatValue;
835 };
836}
837#include "vmware_pack_end.h"
838SVGA3dTextureState;
839
840typedef
841#include "vmware_pack_begin.h"
842struct {
843 uint32 cid;
844 /* Followed by variable number of SVGA3dTextureState structures */
845}
846#include "vmware_pack_end.h"
847SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
848
849typedef
850#include "vmware_pack_begin.h"
851struct {
852 uint32 cid;
853 SVGA3dTransformType type;
854 float matrix[16];
855}
856#include "vmware_pack_end.h"
857SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
858
859typedef
860#include "vmware_pack_begin.h"
861struct {
862 float min;
863 float max;
864}
865#include "vmware_pack_end.h"
866SVGA3dZRange;
867
868typedef
869#include "vmware_pack_begin.h"
870struct {
871 uint32 cid;
872 SVGA3dZRange zRange;
873}
874#include "vmware_pack_end.h"
875SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
876
877typedef
878#include "vmware_pack_begin.h"
879struct {
880 float diffuse[4];
881 float ambient[4];
882 float specular[4];
883 float emissive[4];
884 float shininess;
885}
886#include "vmware_pack_end.h"
887SVGA3dMaterial;
888
889typedef
890#include "vmware_pack_begin.h"
891struct {
892 uint32 cid;
893 SVGA3dFace face;
894 SVGA3dMaterial material;
895}
896#include "vmware_pack_end.h"
897SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
898
899typedef
900#include "vmware_pack_begin.h"
901struct {
902 uint32 cid;
903 uint32 index;
904 SVGA3dLightData data;
905}
906#include "vmware_pack_end.h"
907SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
908
909typedef
910#include "vmware_pack_begin.h"
911struct {
912 uint32 cid;
913 uint32 index;
914 uint32 enabled;
915}
916#include "vmware_pack_end.h"
917SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
918
919typedef
920#include "vmware_pack_begin.h"
921struct {
922 uint32 cid;
923 SVGA3dRect rect;
924}
925#include "vmware_pack_end.h"
926SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
927
928typedef
929#include "vmware_pack_begin.h"
930struct {
931 uint32 cid;
932 SVGA3dRect rect;
933}
934#include "vmware_pack_end.h"
935SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
936
937typedef
938#include "vmware_pack_begin.h"
939struct {
940 uint32 cid;
941 uint32 index;
942 float plane[4];
943}
944#include "vmware_pack_end.h"
945SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
946
947typedef
948#include "vmware_pack_begin.h"
949struct {
950 uint32 cid;
951 uint32 shid;
952 SVGA3dShaderType type;
953 /* Followed by variable number of DWORDs for shader bycode */
954}
955#include "vmware_pack_end.h"
956SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
957
958typedef
959#include "vmware_pack_begin.h"
960struct {
961 uint32 cid;
962 uint32 shid;
963 SVGA3dShaderType type;
964}
965#include "vmware_pack_end.h"
966SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
967
968typedef
969#include "vmware_pack_begin.h"
970struct {
971 uint32 cid;
972 uint32 reg; /* register number */
973 SVGA3dShaderType type;
974 SVGA3dShaderConstType ctype;
975 uint32 values[4];
976
977 /*
978 * Followed by a variable number of additional values.
979 */
980}
981#include "vmware_pack_end.h"
982SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
983
984typedef
985#include "vmware_pack_begin.h"
986struct {
987 uint32 cid;
988 SVGA3dShaderType type;
989 uint32 shid;
990}
991#include "vmware_pack_end.h"
992SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
993
994typedef
995#include "vmware_pack_begin.h"
996struct {
997 uint32 cid;
998 SVGA3dQueryType type;
999}
1000#include "vmware_pack_end.h"
1001SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1002
1003typedef
1004#include "vmware_pack_begin.h"
1005struct {
1006 uint32 cid;
1007 SVGA3dQueryType type;
1008 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1009}
1010#include "vmware_pack_end.h"
1011SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1012
1013
1014/*
1015 * SVGA3D_CMD_WAIT_FOR_QUERY --
1016 *
1017 * Will read the SVGA3dQueryResult structure pointed to by guestResult,
1018 * and if the state member is set to anything else than
1019 * SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
1020 *
1021 * Otherwise, in addition to the query explicitly waited for,
1022 * All queries with the same type and issued with the same cid, for which
1023 * an SVGA_3D_CMD_END_QUERY command has previously been sent, will
1024 * be finished after execution of this command.
1025 *
1026 * A query will be identified by the gmrId and offset of the guestResult
1027 * member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
1028 * been sent previously with an indentical gmrId and offset, it will
1029 * effectively end all queries with an identical type issued with the
1030 * same cid, and the SVGA3dQueryResult structure pointed to by
1031 * guestResult will not be written to. This property can be used to
1032 * implement a query barrier for a given cid and query type.
1033 */
1034
1035typedef
1036#include "vmware_pack_begin.h"
1037struct {
1038 uint32 cid; /* Same parameters passed to END_QUERY */
1039 SVGA3dQueryType type;
1040 SVGAGuestPtr guestResult;
1041}
1042#include "vmware_pack_end.h"
1043SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1044
1045typedef
1046#include "vmware_pack_begin.h"
1047struct {
1048 uint32 totalSize; /* Set by guest before query is ended. */
1049 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1050 union { /* Set by host on exit from PENDING state */
1051 uint32 result32;
1052 uint32 queryCookie; /* May be used to identify which QueryGetData this
1053 result corresponds to. */
1054 };
1055}
1056#include "vmware_pack_end.h"
1057SVGA3dQueryResult;
1058
1059
1060/*
1061 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1062 *
1063 * This is a blit from an SVGA3D surface to a Screen Object.
1064 * This blit must be directed at a specific screen.
1065 *
1066 * The blit copies from a rectangular region of an SVGA3D surface
1067 * image to a rectangular region of a screen.
1068 *
1069 * This command takes an optional variable-length list of clipping
1070 * rectangles after the body of the command. If no rectangles are
1071 * specified, there is no clipping region. The entire destRect is
1072 * drawn to. If one or more rectangles are included, they describe
1073 * a clipping region. The clip rectangle coordinates are measured
1074 * relative to the top-left corner of destRect.
1075 *
1076 * The srcImage must be from mip=0 face=0.
1077 *
1078 * This supports scaling if the src and dest are of different sizes.
1079 *
1080 * Availability:
1081 * SVGA_FIFO_CAP_SCREEN_OBJECT
1082 */
1083
1084typedef
1085#include "vmware_pack_begin.h"
1086struct {
1087 SVGA3dSurfaceImageId srcImage;
1088 SVGASignedRect srcRect;
1089 uint32 destScreenId; /* Screen Object ID */
1090 SVGASignedRect destRect;
1091 /* Clipping: zero or more SVGASignedRects follow */
1092}
1093#include "vmware_pack_end.h"
1094SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1095
1096typedef
1097#include "vmware_pack_begin.h"
1098struct {
1099 uint32 sid;
1100 SVGA3dTextureFilter filter;
1101}
1102#include "vmware_pack_end.h"
1103SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
1104
1105
1106
1107typedef
1108#include "vmware_pack_begin.h"
1109struct {
1110 uint32 sid;
1111}
1112#include "vmware_pack_end.h"
1113SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */
1114
1115typedef
1116#include "vmware_pack_begin.h"
1117struct {
1118 uint32 sid;
1119}
1120#include "vmware_pack_end.h"
1121SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
1122
1123/*
1124 * Screen DMA command
1125 *
1126 * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device
1127 * cap bit is not required.
1128 *
1129 * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
1130 * be different, but it is required that guest makes sure refBuffer has
1131 * exactly the same contents that were written to when last time screen DMA
1132 * command is received by host.
1133 *
1134 * - changemap is generated by lib/blit, and it has the changes from last
1135 * received screen DMA or more.
1136 */
1137
1138typedef
1139#include "vmware_pack_begin.h"
1140struct SVGA3dCmdScreenDMA {
1141 uint32 screenId;
1142 SVGAGuestImage refBuffer;
1143 SVGAGuestImage destBuffer;
1144 SVGAGuestImage changeMap;
1145}
1146#include "vmware_pack_end.h"
1147SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
1148
1149/*
1150 * Set Unity Surface Cookie
1151 *
1152 * Associates the supplied cookie with the surface id for use with
1153 * Unity. This cookie is a hint from guest to host, there is no way
1154 * for the guest to readback the cookie and the host is free to drop
1155 * the cookie association at will. The default value for the cookie
1156 * on all surfaces is 0.
1157 */
1158
1159typedef
1160#include "vmware_pack_begin.h"
1161struct SVGA3dCmdSetUnitySurfaceCookie {
1162 uint32 sid;
1163 uint64 cookie;
1164}
1165#include "vmware_pack_end.h"
1166SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
1167
1168/*
1169 * Open a context-specific surface in a non-context-specific manner.
1170 */
1171
1172typedef
1173#include "vmware_pack_begin.h"
1174struct SVGA3dCmdOpenContextSurface {
1175 uint32 sid;
1176}
1177#include "vmware_pack_end.h"
1178SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
1179
1180
1181/*
1182 * Logic ops
1183 */
1184
1185#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01)
1186#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01)
1187#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02)
1188#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01)
1189
1190typedef
1191#include "vmware_pack_begin.h"
1192struct SVGA3dCmdLogicOpsBitBlt {
1193 /*
1194 * All LogicOps surfaces are one-level
1195 * surfaces so mipmap & face should always
1196 * be zero.
1197 */
1198 SVGA3dSurfaceImageId src;
1199 SVGA3dSurfaceImageId dst;
1200 SVGA3dLogicOp logicOp;
1201 /* Followed by variable number of SVGA3dCopyBox structures */
1202}
1203#include "vmware_pack_end.h"
1204SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */
1205
1206
1207typedef
1208#include "vmware_pack_begin.h"
1209struct SVGA3dCmdLogicOpsTransBlt {
1210 /*
1211 * All LogicOps surfaces are one-level
1212 * surfaces so mipmap & face should always
1213 * be zero.
1214 */
1215 SVGA3dSurfaceImageId src;
1216 SVGA3dSurfaceImageId dst;
1217 uint32 color;
1218 uint32 flags;
1219 SVGA3dBox srcBox;
1220 SVGA3dBox dstBox;
1221}
1222#include "vmware_pack_end.h"
1223SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
1224
1225
1226typedef
1227#include "vmware_pack_begin.h"
1228struct SVGA3dCmdLogicOpsStretchBlt {
1229 /*
1230 * All LogicOps surfaces are one-level
1231 * surfaces so mipmap & face should always
1232 * be zero.
1233 */
1234 SVGA3dSurfaceImageId src;
1235 SVGA3dSurfaceImageId dst;
1236 uint16 mode;
1237 uint16 flags;
1238 SVGA3dBox srcBox;
1239 SVGA3dBox dstBox;
1240}
1241#include "vmware_pack_end.h"
1242SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
1243
1244
1245typedef
1246#include "vmware_pack_begin.h"
1247struct SVGA3dCmdLogicOpsColorFill {
1248 /*
1249 * All LogicOps surfaces are one-level
1250 * surfaces so mipmap & face should always
1251 * be zero.
1252 */
1253 SVGA3dSurfaceImageId dst;
1254 uint32 color;
1255 SVGA3dLogicOp logicOp;
1256 /* Followed by variable number of SVGA3dRect structures. */
1257}
1258#include "vmware_pack_end.h"
1259SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
1260
1261
1262typedef
1263#include "vmware_pack_begin.h"
1264struct SVGA3dCmdLogicOpsAlphaBlend {
1265 /*
1266 * All LogicOps surfaces are one-level
1267 * surfaces so mipmap & face should always
1268 * be zero.
1269 */
1270 SVGA3dSurfaceImageId src;
1271 SVGA3dSurfaceImageId dst;
1272 uint32 alphaVal;
1273 uint32 flags;
1274 SVGA3dBox srcBox;
1275 SVGA3dBox dstBox;
1276}
1277#include "vmware_pack_end.h"
1278SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
1279
1280#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
1281
1282#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512
1283#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
1284
1285typedef
1286#include "vmware_pack_begin.h"
1287struct SVGA3dCmdLogicOpsClearTypeBlend {
1288 /*
1289 * All LogicOps surfaces are one-level
1290 * surfaces so mipmap & face should always
1291 * be zero.
1292 */
1293 SVGA3dSurfaceImageId tmp;
1294 SVGA3dSurfaceImageId dst;
1295 SVGA3dSurfaceImageId gammaSurf;
1296 SVGA3dSurfaceImageId alphaSurf;
1297 uint32 gamma;
1298 uint32 color;
1299 uint32 color2;
1300 int32 alphaOffsetX;
1301 int32 alphaOffsetY;
1302 /* Followed by variable number of SVGA3dBox structures */
1303}
1304#include "vmware_pack_end.h"
1305SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
1306
1307
1308/*
1309 * Guest-backed objects definitions.
1310 */
1311
1312typedef
1313#include "vmware_pack_begin.h"
1314struct {
1315 SVGAMobFormat ptDepth;
1316 uint32 sizeInBytes;
1317 PPN64 base;
1318}
1319#include "vmware_pack_end.h"
1320SVGAOTableMobEntry;
1321#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
1322
1323typedef
1324#include "vmware_pack_begin.h"
1325struct {
1326 SVGA3dSurfaceFormat format;
1327 SVGA3dSurfaceFlags surfaceFlags;
1328 uint32 numMipLevels;
1329 uint32 multisampleCount;
1330 SVGA3dTextureFilter autogenFilter;
1331 SVGA3dSize size;
1332 SVGAMobId mobid;
1333 uint32 arraySize;
1334 uint32 mobPitch;
1335 uint32 pad[5];
1336}
1337#include "vmware_pack_end.h"
1338SVGAOTableSurfaceEntry;
1339#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
1340
1341typedef
1342#include "vmware_pack_begin.h"
1343struct {
1344 uint32 cid;
1345 SVGAMobId mobid;
1346}
1347#include "vmware_pack_end.h"
1348SVGAOTableContextEntry;
1349#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
1350
1351typedef
1352#include "vmware_pack_begin.h"
1353struct {
1354 SVGA3dShaderType type;
1355 uint32 sizeInBytes;
1356 uint32 offsetInBytes;
1357 SVGAMobId mobid;
1358}
1359#include "vmware_pack_end.h"
1360SVGAOTableShaderEntry;
1361#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
1362
1363#define SVGA_STFLAG_PRIMARY (1 << 0)
1364typedef uint32 SVGAScreenTargetFlags;
1365
1366typedef
1367#include "vmware_pack_begin.h"
1368struct {
1369 SVGA3dSurfaceImageId image;
1370 uint32 width;
1371 uint32 height;
1372 int32 xRoot;
1373 int32 yRoot;
1374 SVGAScreenTargetFlags flags;
1375 uint32 dpi;
1376 uint32 pad[7];
1377}
1378#include "vmware_pack_end.h"
1379SVGAOTableScreenTargetEntry;
1380#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
1381 (sizeof(SVGAOTableScreenTargetEntry))
1382
1383typedef
1384#include "vmware_pack_begin.h"
1385struct {
1386 float value[4];
1387}
1388#include "vmware_pack_end.h"
1389SVGA3dShaderConstFloat;
1390
1391typedef
1392#include "vmware_pack_begin.h"
1393struct {
1394 int32 value[4];
1395}
1396#include "vmware_pack_end.h"
1397SVGA3dShaderConstInt;
1398
1399typedef
1400#include "vmware_pack_begin.h"
1401struct {
1402 uint32 value;
1403}
1404#include "vmware_pack_end.h"
1405SVGA3dShaderConstBool;
1406
1407typedef
1408#include "vmware_pack_begin.h"
1409struct {
1410 uint16 streamOffset;
1411 uint8 stream;
1412 uint8 type;
1413 uint8 methodUsage;
1414 uint8 usageIndex;
1415}
1416#include "vmware_pack_end.h"
1417SVGAGBVertexElement;
1418
1419typedef
1420#include "vmware_pack_begin.h"
1421struct {
1422 uint32 sid;
1423 uint16 stride;
1424 uint32 offset;
1425}
1426#include "vmware_pack_end.h"
1427SVGAGBVertexStream;
1428typedef
1429#include "vmware_pack_begin.h"
1430struct {
1431 SVGA3dRect viewport;
1432 SVGA3dRect scissorRect;
1433 SVGA3dZRange zRange;
1434
1435 SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
1436 SVGAGBVertexElement decl1[4];
1437
1438 uint32 renderStates[SVGA3D_RS_MAX];
1439 SVGAGBVertexElement decl2[18];
1440 uint32 pad0[2];
1441
1442 struct {
1443 SVGA3dFace face;
1444 SVGA3dMaterial material;
1445 } material;
1446
1447 float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
1448 float matrices[SVGA3D_TRANSFORM_MAX][16];
1449
1450 SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
1451 SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
1452
1453 /*
1454 * Shaders currently bound
1455 */
1456 uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
1457 SVGAGBVertexElement decl3[10];
1458 uint32 pad1[3];
1459
1460 uint32 occQueryActive;
1461 uint32 occQueryValue;
1462
1463 /*
1464 * Int/Bool Shader constants
1465 */
1466 SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
1467 SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
1468 uint16 pShaderBValues;
1469 uint16 vShaderBValues;
1470
1471
1472 SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
1473 SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
1474 uint32 numVertexDecls;
1475 uint32 numVertexStreams;
1476 uint32 numVertexDivisors;
1477 uint32 pad2[30];
1478
1479 /*
1480 * Texture Stages
1481 *
1482 * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
1483 * textureStages array.
1484 * SVGA3D_TS_COLOR_KEY is in tsColorKey.
1485 */
1486 uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
1487 uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
1488 uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
1489
1490 /*
1491 * Float Shader constants.
1492 */
1493 SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
1494 SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
1495}
1496#include "vmware_pack_end.h"
1497SVGAGBContextData;
1498#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
1499
1500/*
1501 * SVGA3dCmdSetOTableBase --
1502 *
1503 * This command allows the guest to specify the base PPN of the
1504 * specified object table.
1505 */
1506
1507typedef
1508#include "vmware_pack_begin.h"
1509struct {
1510 SVGAOTableType type;
1511 PPN baseAddress;
1512 uint32 sizeInBytes;
1513 uint32 validSizeInBytes;
1514 SVGAMobFormat ptDepth;
1515}
1516#include "vmware_pack_end.h"
1517SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
1518
1519typedef
1520#include "vmware_pack_begin.h"
1521struct {
1522 SVGAOTableType type;
1523 PPN64 baseAddress;
1524 uint32 sizeInBytes;
1525 uint32 validSizeInBytes;
1526 SVGAMobFormat ptDepth;
1527}
1528#include "vmware_pack_end.h"
1529SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
1530
1531typedef
1532#include "vmware_pack_begin.h"
1533struct {
1534 SVGAOTableType type;
1535}
1536#include "vmware_pack_end.h"
1537SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
1538
1539/*
1540 * Define a memory object (Mob) in the OTable.
1541 */
1542
1543typedef
1544#include "vmware_pack_begin.h"
1545struct SVGA3dCmdDefineGBMob {
1546 SVGAMobId mobid;
1547 SVGAMobFormat ptDepth;
1548 PPN base;
1549 uint32 sizeInBytes;
1550}
1551#include "vmware_pack_end.h"
1552SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
1553
1554
1555/*
1556 * Destroys an object in the OTable.
1557 */
1558
1559typedef
1560#include "vmware_pack_begin.h"
1561struct SVGA3dCmdDestroyGBMob {
1562 SVGAMobId mobid;
1563}
1564#include "vmware_pack_end.h"
1565SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
1566
1567
1568/*
1569 * Define a memory object (Mob) in the OTable with a PPN64 base.
1570 */
1571
1572typedef
1573#include "vmware_pack_begin.h"
1574struct SVGA3dCmdDefineGBMob64 {
1575 SVGAMobId mobid;
1576 SVGAMobFormat ptDepth;
1577 PPN64 base;
1578 uint32 sizeInBytes;
1579}
1580#include "vmware_pack_end.h"
1581SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
1582
1583/*
1584 * Redefine an object in the OTable with PPN64 base.
1585 */
1586
1587typedef
1588#include "vmware_pack_begin.h"
1589struct SVGA3dCmdRedefineGBMob64 {
1590 SVGAMobId mobid;
1591 SVGAMobFormat ptDepth;
1592 PPN64 base;
1593 uint32 sizeInBytes;
1594}
1595#include "vmware_pack_end.h"
1596SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
1597
1598/*
1599 * Notification that the page tables have been modified.
1600 */
1601
1602typedef
1603#include "vmware_pack_begin.h"
1604struct SVGA3dCmdUpdateGBMobMapping {
1605 SVGAMobId mobid;
1606}
1607#include "vmware_pack_end.h"
1608SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
1609
1610/*
1611 * Define a guest-backed surface.
1612 */
1613
1614typedef
1615#include "vmware_pack_begin.h"
1616struct SVGA3dCmdDefineGBSurface {
1617 uint32 sid;
1618 SVGA3dSurfaceFlags surfaceFlags;
1619 SVGA3dSurfaceFormat format;
1620 uint32 numMipLevels;
1621 uint32 multisampleCount;
1622 SVGA3dTextureFilter autogenFilter;
1623 SVGA3dSize size;
1624}
1625#include "vmware_pack_end.h"
1626SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
1627
1628/*
1629 * Destroy a guest-backed surface.
1630 */
1631
1632typedef
1633#include "vmware_pack_begin.h"
1634struct SVGA3dCmdDestroyGBSurface {
1635 uint32 sid;
1636}
1637#include "vmware_pack_end.h"
1638SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
1639
1640/*
1641 * Bind a guest-backed surface to a mob.
1642 */
1643
1644typedef
1645#include "vmware_pack_begin.h"
1646struct SVGA3dCmdBindGBSurface {
1647 uint32 sid;
1648 SVGAMobId mobid;
1649}
1650#include "vmware_pack_end.h"
1651SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
1652
1653typedef
1654#include "vmware_pack_begin.h"
1655struct SVGA3dCmdBindGBSurfaceWithPitch {
1656 uint32 sid;
1657 SVGAMobId mobid;
1658 uint32 baseLevelPitch;
1659}
1660#include "vmware_pack_end.h"
1661SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
1662
1663/*
1664 * Conditionally bind a mob to a guest-backed surface if testMobid
1665 * matches the currently bound mob. Optionally issue a
1666 * readback/update on the surface while it is still bound to the old
1667 * mobid if the mobid is changed by this command.
1668 */
1669
1670#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
1671#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1)
1672
1673typedef
1674#include "vmware_pack_begin.h"
1675struct{
1676 uint32 sid;
1677 SVGAMobId testMobid;
1678 SVGAMobId mobid;
1679 uint32 flags;
1680}
1681#include "vmware_pack_end.h"
1682SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
1683
1684/*
1685 * Update an image in a guest-backed surface.
1686 * (Inform the device that the guest-contents have been updated.)
1687 */
1688
1689typedef
1690#include "vmware_pack_begin.h"
1691struct SVGA3dCmdUpdateGBImage {
1692 SVGA3dSurfaceImageId image;
1693 SVGA3dBox box;
1694}
1695#include "vmware_pack_end.h"
1696SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
1697
1698/*
1699 * Update an entire guest-backed surface.
1700 * (Inform the device that the guest-contents have been updated.)
1701 */
1702
1703typedef
1704#include "vmware_pack_begin.h"
1705struct SVGA3dCmdUpdateGBSurface {
1706 uint32 sid;
1707}
1708#include "vmware_pack_end.h"
1709SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
1710
1711/*
1712 * Readback an image in a guest-backed surface.
1713 * (Request the device to flush the dirty contents into the guest.)
1714 */
1715
1716typedef
1717#include "vmware_pack_begin.h"
1718struct SVGA3dCmdReadbackGBImage {
1719 SVGA3dSurfaceImageId image;
1720}
1721#include "vmware_pack_end.h"
1722SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */
1723
1724/*
1725 * Readback an entire guest-backed surface.
1726 * (Request the device to flush the dirty contents into the guest.)
1727 */
1728
1729typedef
1730#include "vmware_pack_begin.h"
1731struct SVGA3dCmdReadbackGBSurface {
1732 uint32 sid;
1733}
1734#include "vmware_pack_end.h"
1735SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
1736
1737/*
1738 * Readback a sub rect of an image in a guest-backed surface. After
1739 * issuing this command the driver is required to issue an update call
1740 * of the same region before issuing any other commands that reference
1741 * this surface or rendering is not guaranteed.
1742 */
1743
1744typedef
1745#include "vmware_pack_begin.h"
1746struct SVGA3dCmdReadbackGBImagePartial {
1747 SVGA3dSurfaceImageId image;
1748 SVGA3dBox box;
1749 uint32 invertBox;
1750}
1751#include "vmware_pack_end.h"
1752SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
1753
1754
1755/*
1756 * Invalidate an image in a guest-backed surface.
1757 * (Notify the device that the contents can be lost.)
1758 */
1759
1760typedef
1761#include "vmware_pack_begin.h"
1762struct SVGA3dCmdInvalidateGBImage {
1763 SVGA3dSurfaceImageId image;
1764}
1765#include "vmware_pack_end.h"
1766SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
1767
1768/*
1769 * Invalidate an entire guest-backed surface.
1770 * (Notify the device that the contents if all images can be lost.)
1771 */
1772
1773typedef
1774#include "vmware_pack_begin.h"
1775struct SVGA3dCmdInvalidateGBSurface {
1776 uint32 sid;
1777}
1778#include "vmware_pack_end.h"
1779SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
1780
1781/*
1782 * Invalidate a sub rect of an image in a guest-backed surface. After
1783 * issuing this command the driver is required to issue an update call
1784 * of the same region before issuing any other commands that reference
1785 * this surface or rendering is not guaranteed.
1786 */
1787
1788typedef
1789#include "vmware_pack_begin.h"
1790struct SVGA3dCmdInvalidateGBImagePartial {
1791 SVGA3dSurfaceImageId image;
1792 SVGA3dBox box;
1793 uint32 invertBox;
1794}
1795#include "vmware_pack_end.h"
1796SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
1797
1798
1799/*
1800 * Define a guest-backed context.
1801 */
1802
1803typedef
1804#include "vmware_pack_begin.h"
1805struct SVGA3dCmdDefineGBContext {
1806 uint32 cid;
1807}
1808#include "vmware_pack_end.h"
1809SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
1810
1811/*
1812 * Destroy a guest-backed context.
1813 */
1814
1815typedef
1816#include "vmware_pack_begin.h"
1817struct SVGA3dCmdDestroyGBContext {
1818 uint32 cid;
1819}
1820#include "vmware_pack_end.h"
1821SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
1822
1823/*
1824 * Bind a guest-backed context.
1825 *
1826 * validContents should be set to 0 for new contexts,
1827 * and 1 if this is an old context which is getting paged
1828 * back on to the device.
1829 *
1830 * For new contexts, it is recommended that the driver
1831 * issue commands to initialize all interesting state
1832 * prior to rendering.
1833 */
1834
1835typedef
1836#include "vmware_pack_begin.h"
1837struct SVGA3dCmdBindGBContext {
1838 uint32 cid;
1839 SVGAMobId mobid;
1840 uint32 validContents;
1841}
1842#include "vmware_pack_end.h"
1843SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
1844
1845/*
1846 * Readback a guest-backed context.
1847 * (Request that the device flush the contents back into guest memory.)
1848 */
1849
1850typedef
1851#include "vmware_pack_begin.h"
1852struct SVGA3dCmdReadbackGBContext {
1853 uint32 cid;
1854}
1855#include "vmware_pack_end.h"
1856SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
1857
1858/*
1859 * Invalidate a guest-backed context.
1860 */
1861typedef
1862#include "vmware_pack_begin.h"
1863struct SVGA3dCmdInvalidateGBContext {
1864 uint32 cid;
1865}
1866#include "vmware_pack_end.h"
1867SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
1868
1869/*
1870 * Define a guest-backed shader.
1871 */
1872
1873typedef
1874#include "vmware_pack_begin.h"
1875struct SVGA3dCmdDefineGBShader {
1876 uint32 shid;
1877 SVGA3dShaderType type;
1878 uint32 sizeInBytes;
1879}
1880#include "vmware_pack_end.h"
1881SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
1882
1883/*
1884 * Bind a guest-backed shader.
1885 */
1886
1887typedef
1888#include "vmware_pack_begin.h"
1889struct SVGA3dCmdBindGBShader {
1890 uint32 shid;
1891 SVGAMobId mobid;
1892 uint32 offsetInBytes;
1893}
1894#include "vmware_pack_end.h"
1895SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
1896
1897/*
1898 * Destroy a guest-backed shader.
1899 */
1900
1901typedef
1902#include "vmware_pack_begin.h"
1903struct SVGA3dCmdDestroyGBShader {
1904 uint32 shid;
1905}
1906#include "vmware_pack_end.h"
1907SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
1908
1909typedef
1910#include "vmware_pack_begin.h"
1911struct {
1912 uint32 cid;
1913 uint32 regStart;
1914 SVGA3dShaderType shaderType;
1915 SVGA3dShaderConstType constType;
1916
1917 /*
1918 * Followed by a variable number of shader constants.
1919 *
1920 * Note that FLOAT and INT constants are 4-dwords in length, while
1921 * BOOL constants are 1-dword in length.
1922 */
1923}
1924#include "vmware_pack_end.h"
1925SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
1926
1927
1928typedef
1929#include "vmware_pack_begin.h"
1930struct {
1931 uint32 cid;
1932 SVGA3dQueryType type;
1933}
1934#include "vmware_pack_end.h"
1935SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
1936
1937typedef
1938#include "vmware_pack_begin.h"
1939struct {
1940 uint32 cid;
1941 SVGA3dQueryType type;
1942 SVGAMobId mobid;
1943 uint32 offset;
1944}
1945#include "vmware_pack_end.h"
1946SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
1947
1948
1949/*
1950 * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
1951 *
1952 * The semantics of this command are identical to the
1953 * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
1954 * to a Mob instead of a GMR.
1955 */
1956
1957typedef
1958#include "vmware_pack_begin.h"
1959struct {
1960 uint32 cid;
1961 SVGA3dQueryType type;
1962 SVGAMobId mobid;
1963 uint32 offset;
1964}
1965#include "vmware_pack_end.h"
1966SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
1967
1968
1969typedef
1970#include "vmware_pack_begin.h"
1971struct {
1972 SVGAMobId mobid;
1973 uint32 mustBeZero;
1974 uint32 initialized;
1975}
1976#include "vmware_pack_end.h"
1977SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
1978
1979typedef
1980#include "vmware_pack_begin.h"
1981struct {
1982 SVGAMobId mobid;
1983 uint32 gartOffset;
1984}
1985#include "vmware_pack_end.h"
1986SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
1987
1988
1989typedef
1990#include "vmware_pack_begin.h"
1991struct {
1992 uint32 gartOffset;
1993 uint32 numPages;
1994}
1995#include "vmware_pack_end.h"
1996SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
1997
1998
1999/*
2000 * Screen Targets
2001 */
2002
2003typedef
2004#include "vmware_pack_begin.h"
2005struct {
2006 uint32 stid;
2007 uint32 width;
2008 uint32 height;
2009 int32 xRoot;
2010 int32 yRoot;
2011 SVGAScreenTargetFlags flags;
2012
2013 /*
2014 * The physical DPI that the guest expects this screen displayed at.
2015 *
2016 * Guests which are not DPI-aware should set this to zero.
2017 */
2018 uint32 dpi;
2019}
2020#include "vmware_pack_end.h"
2021SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
2022
2023typedef
2024#include "vmware_pack_begin.h"
2025struct {
2026 uint32 stid;
2027}
2028#include "vmware_pack_end.h"
2029SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
2030
2031typedef
2032#include "vmware_pack_begin.h"
2033struct {
2034 uint32 stid;
2035 SVGA3dSurfaceImageId image;
2036}
2037#include "vmware_pack_end.h"
2038SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
2039
2040typedef
2041#include "vmware_pack_begin.h"
2042struct {
2043 uint32 stid;
2044 SVGA3dRect rect;
2045}
2046#include "vmware_pack_end.h"
2047SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
2048
2049typedef
2050#include "vmware_pack_begin.h"
2051struct SVGA3dCmdGBScreenDMA {
2052 uint32 screenId;
2053 uint32 dead;
2054 SVGAMobId destMobID;
2055 uint32 destPitch;
2056 SVGAMobId changeMapMobID;
2057}
2058#include "vmware_pack_end.h"
2059SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */
2060
2061typedef
2062#include "vmware_pack_begin.h"
2063struct {
2064 uint32 value;
2065 uint32 mobId;
2066 uint32 mobOffset;
2067}
2068#include "vmware_pack_end.h"
2069SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
2070
2071#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 000000000000..c18b663f360f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
1/**********************************************************
2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_devcaps.h --
28 *
29 * SVGA 3d caps definitions
30 */
31
32#ifndef _SVGA3D_DEVCAPS_H_
33#define _SVGA3D_DEVCAPS_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41/*
42 * 3D Hardware Version
43 *
44 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
45 * register. Is set by the host and read by the guest. This lets
46 * us make new guest drivers which are backwards-compatible with old
47 * SVGA hardware revisions. It does not let us support old guest
48 * drivers. Good enough for now.
49 *
50 */
51
52#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
53#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
54#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
55
56typedef enum {
57 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
58 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
59 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
60 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
61 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
62 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
63 SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
64 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
65} SVGA3dHardwareVersion;
66
67/*
68 * DevCap indexes.
69 */
70
71typedef enum {
72 SVGA3D_DEVCAP_INVALID = ((uint32)-1),
73 SVGA3D_DEVCAP_3D = 0,
74 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
75
76 /*
77 * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
78 * fixed-function texture units available. Each of these units
79 * work in both FFP and Shader modes, and they support texture
80 * transforms and texture coordinates. The host may have additional
81 * texture image units that are only usable with shaders.
82 */
83 SVGA3D_DEVCAP_MAX_TEXTURES = 2,
84 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
85 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
86 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
87 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
88 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
89 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
90 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
91 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
92 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
93 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
94 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
95 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
96 SVGA3D_DEVCAP_QUERY_TYPES = 15,
97 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
98 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
99 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
100 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
101 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
102 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
103 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
104 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
105 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
106 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
107 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
108 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
109 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
110 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
111 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
112 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
113 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
114 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
115 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
116 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
117 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
118 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
119 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
120 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
121 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
122 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
123 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
124 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
125 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
126 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
127 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
128 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
129 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
130 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
131 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
132 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
133 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
134 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
135 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
136 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
137 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
138 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
139 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
140 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
141 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
142 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
143
144 /*
145 * There is a hole in our devcap definitions for
146 * historical reasons.
147 *
148 * Define a constant just for completeness.
149 */
150 SVGA3D_DEVCAP_MISSING62 = 62,
151
152 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
153
154 /*
155 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
156 * render targets. This does not include the depth or stencil targets.
157 */
158 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
159
160 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
161 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
162 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
163 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
164 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
165 SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
166 SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
167 SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
168 SVGA3D_DEVCAP_SUPERSAMPLE = 73,
169 SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
170 SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
171 SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
172
173 /*
174 * This is the maximum number of SVGA context IDs that the guest
175 * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
176 */
177 SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
178
179 /*
180 * This is the maximum number of SVGA surface IDs that the guest
181 * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
182 */
183 SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
184
185 SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
186 SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
187 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
188
189 SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
190 SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
191
192 /*
193 * Deprecated.
194 */
195 SVGA3D_DEVCAP_DEAD1 = 84,
196
197 /*
198 * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
199 * ored together, one for every type of video decoding supported.
200 */
201 SVGA3D_DEVCAP_VIDEO_DECODE = 85,
202
203 /*
204 * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
205 * ored together, one for every type of video processing supported.
206 */
207 SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
208
209 SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
210 SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
211 SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
212 SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
213
214 SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
215
216 /*
217 * Does the host support the SVGA logic ops commands?
218 */
219 SVGA3D_DEVCAP_LOGICOPS = 92,
220
221 /*
222 * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
223 */
224 SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
225
226 /*
227 * Deprecated.
228 */
229 SVGA3D_DEVCAP_DEAD2 = 94,
230
231 /*
232 * Does the device support the DX commands?
233 */
234 SVGA3D_DEVCAP_DX = 95,
235
236 /*
237 * What is the maximum size of a texture array?
238 *
239 * (Even if this cap is zero, cubemaps are still allowed.)
240 */
241 SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
242
243 /*
244 * What is the maximum number of vertex buffers that can
245 * be used in the DXContext inputAssembly?
246 */
247 SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
248
249 /*
250 * What is the maximum number of constant buffers
251 * that can be expected to work correctly with a
252 * DX context?
253 */
254 SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
255
256 /*
257 * Does the device support provoking vertex control?
258 * If zero, the first vertex will always be the provoking vertex.
259 */
260 SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
261
262 SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
263 SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
264 SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
265 SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
266 SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
267 SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
268 SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
269 SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
270 SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
271 SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
272 SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
273 SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
274 SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
275 SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
276 SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
277 SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
278 SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
279 SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
280 SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
281 SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
282 SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
283 SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
284 SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
285 SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
286 SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
287 SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
288 SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
289 SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
290 SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
291 SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
292 SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
293 SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
294 SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
295 SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
296 SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
297 SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
298 SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
299 SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
300 SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
301 SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
302 SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
303 SVGA3D_DEVCAP_DXFMT_UYVY = 141,
304 SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
305 SVGA3D_DEVCAP_DXFMT_NV12 = 143,
306 SVGA3D_DEVCAP_DXFMT_AYUV = 144,
307 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
308 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
309 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
310 SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
311 SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
312 SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
313 SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
314 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
315 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
316 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
317 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
318 SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
319 SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
320 SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
321 SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
322 SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
323 SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
324 SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
325 SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
326 SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
327 SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
328 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
329 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
330 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
331 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
332 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
333 SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
334 SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
335 SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
336 SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
337 SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
338 SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
339 SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
340 SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
341 SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
342 SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
343 SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
344 SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
345 SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
346 SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
347 SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
348 SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
349 SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
350 SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
351 SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
352 SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
353 SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
354 SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
355 SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
356 SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
357 SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
358 SVGA3D_DEVCAP_DXFMT_P8 = 196,
359 SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
360 SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
361 SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
362 SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
363 SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
364 SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
365 SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
366 SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
367 SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
368 SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
369 SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
370 SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
371 SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
372 SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
373 SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
374 SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
375 SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
376 SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
377 SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
378 SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
379 SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
380 SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
381 SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
382 SVGA3D_DEVCAP_DXFMT_YV12 = 220,
383 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
384 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
385 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
386 SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
387 SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
388 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
389 SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
390 SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
391 SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
392 SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
393 SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
394 SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
395 SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
396 SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
397 SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
398 SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
399 SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
400 SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
401 SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
402 SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
403 SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
404 SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
405 SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
406
407 SVGA3D_DEVCAP_MAX /* This must be the last index. */
408} SVGA3dDevCapIndex;
409
410/*
411 * Bit definitions for DXFMT devcaps
412 *
413 *
414 * SUPPORTED: Can the format be defined?
415 * SHADER_SAMPLE: Can the format be sampled from a shader?
416 * COLOR_RENDERTARGET: Can the format be a color render target?
417 * DEPTH_RENDERTARGET: Can the format be a depth render target?
418 * BLENDABLE: Is the format blendable?
419 * MIPS: Does the format support mip levels?
420 * ARRAY: Does the format support texture arrays?
421 * VOLUME: Does the format support having volume?
422 * MULTISAMPLE_2: Does the format support 2x multisample?
423 * MULTISAMPLE_4: Does the format support 4x multisample?
424 * MULTISAMPLE_8: Does the format support 8x multisample?
425 */
426#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
427#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
428#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2)
429#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3)
430#define SVGA3D_DXFMT_BLENDABLE (1 << 4)
431#define SVGA3D_DXFMT_MIPS (1 << 5)
432#define SVGA3D_DXFMT_ARRAY (1 << 6)
433#define SVGA3D_DXFMT_VOLUME (1 << 7)
434#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
435#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
436#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
437#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
438#define SVGADX_DXFMT_MAX (1 << 12)
439
440/*
441 * Convenience mask for any multisample capability.
442 *
443 * The multisample bits imply both load and render capability.
444 */
445#define SVGA3D_DXFMT_MULTISAMPLE ( \
446 SVGADX_DXFMT_MULTISAMPLE_2 | \
447 SVGADX_DXFMT_MULTISAMPLE_4 | \
448 SVGADX_DXFMT_MULTISAMPLE_8 )
449
450typedef union {
451 Bool b;
452 uint32 u;
453 int32 i;
454 float f;
455} SVGA3dDevCapResult;
456
457#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 000000000000..8c5ae608cfb4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
1/**********************************************************
2 * Copyright 2012-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_dx.h --
28 *
29 * SVGA 3d hardware definitions for DX10 support.
30 */
31
32#ifndef _SVGA3D_DX_H_
33#define _SVGA3D_DX_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38#include "includeCheck.h"
39
40#include "svga3d_limits.h"
41
42#define SVGA3D_INPUT_MIN 0
43#define SVGA3D_INPUT_PER_VERTEX_DATA 0
44#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
45#define SVGA3D_INPUT_MAX 2
46typedef uint32 SVGA3dInputClassification;
47
48#define SVGA3D_RESOURCE_TYPE_MIN 1
49#define SVGA3D_RESOURCE_BUFFER 1
50#define SVGA3D_RESOURCE_TEXTURE1D 2
51#define SVGA3D_RESOURCE_TEXTURE2D 3
52#define SVGA3D_RESOURCE_TEXTURE3D 4
53#define SVGA3D_RESOURCE_TEXTURECUBE 5
54#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
55#define SVGA3D_RESOURCE_BUFFEREX 6
56#define SVGA3D_RESOURCE_TYPE_MAX 7
57typedef uint32 SVGA3dResourceType;
58
59#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
60#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
61typedef uint8 SVGA3dDepthWriteMask;
62
63#define SVGA3D_FILTER_MIP_LINEAR (1 << 0)
64#define SVGA3D_FILTER_MAG_LINEAR (1 << 2)
65#define SVGA3D_FILTER_MIN_LINEAR (1 << 4)
66#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
67#define SVGA3D_FILTER_COMPARE (1 << 7)
68typedef uint32 SVGA3dFilter;
69
70#define SVGA3D_CULL_INVALID 0
71#define SVGA3D_CULL_MIN 1
72#define SVGA3D_CULL_NONE 1
73#define SVGA3D_CULL_FRONT 2
74#define SVGA3D_CULL_BACK 3
75#define SVGA3D_CULL_MAX 4
76typedef uint8 SVGA3dCullMode;
77
78#define SVGA3D_COMPARISON_INVALID 0
79#define SVGA3D_COMPARISON_MIN 1
80#define SVGA3D_COMPARISON_NEVER 1
81#define SVGA3D_COMPARISON_LESS 2
82#define SVGA3D_COMPARISON_EQUAL 3
83#define SVGA3D_COMPARISON_LESS_EQUAL 4
84#define SVGA3D_COMPARISON_GREATER 5
85#define SVGA3D_COMPARISON_NOT_EQUAL 6
86#define SVGA3D_COMPARISON_GREATER_EQUAL 7
87#define SVGA3D_COMPARISON_ALWAYS 8
88#define SVGA3D_COMPARISON_MAX 9
89typedef uint8 SVGA3dComparisonFunc;
90
91#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
92#define SVGA3D_DX_MAX_SOTARGETS 4
93#define SVGA3D_DX_MAX_SRVIEWS 128
94#define SVGA3D_DX_MAX_CONSTBUFFERS 16
95#define SVGA3D_DX_MAX_SAMPLERS 16
96
97/* Id limits */
98static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
99static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
100
101typedef uint32 SVGA3dSurfaceId;
102typedef uint32 SVGA3dShaderResourceViewId;
103typedef uint32 SVGA3dRenderTargetViewId;
104typedef uint32 SVGA3dDepthStencilViewId;
105
106typedef uint32 SVGA3dShaderId;
107typedef uint32 SVGA3dElementLayoutId;
108typedef uint32 SVGA3dSamplerId;
109typedef uint32 SVGA3dBlendStateId;
110typedef uint32 SVGA3dDepthStencilStateId;
111typedef uint32 SVGA3dRasterizerStateId;
112typedef uint32 SVGA3dQueryId;
113typedef uint32 SVGA3dStreamOutputId;
114
115typedef union {
116 struct {
117 float r;
118 float g;
119 float b;
120 float a;
121 };
122
123 float value[4];
124} SVGA3dRGBAFloat;
125
126typedef
127#include "vmware_pack_begin.h"
128struct {
129 uint32 cid;
130 SVGAMobId mobid;
131}
132#include "vmware_pack_end.h"
133SVGAOTableDXContextEntry;
134
135typedef
136#include "vmware_pack_begin.h"
137struct SVGA3dCmdDXDefineContext {
138 uint32 cid;
139}
140#include "vmware_pack_end.h"
141SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
142
143typedef
144#include "vmware_pack_begin.h"
145struct SVGA3dCmdDXDestroyContext {
146 uint32 cid;
147}
148#include "vmware_pack_end.h"
149SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
150
151/*
152 * Bind a DX context.
153 *
154 * validContents should be set to 0 for new contexts,
155 * and 1 if this is an old context which is getting paged
156 * back on to the device.
157 *
158 * For new contexts, it is recommended that the driver
159 * issue commands to initialize all interesting state
160 * prior to rendering.
161 */
162typedef
163#include "vmware_pack_begin.h"
164struct SVGA3dCmdDXBindContext {
165 uint32 cid;
166 SVGAMobId mobid;
167 uint32 validContents;
168}
169#include "vmware_pack_end.h"
170SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */
171
172/*
173 * Readback a DX context.
174 * (Request that the device flush the contents back into guest memory.)
175 */
176typedef
177#include "vmware_pack_begin.h"
178struct SVGA3dCmdDXReadbackContext {
179 uint32 cid;
180}
181#include "vmware_pack_end.h"
182SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
183
184/*
185 * Invalidate a guest-backed context.
186 */
187typedef
188#include "vmware_pack_begin.h"
189struct SVGA3dCmdDXInvalidateContext {
190 uint32 cid;
191}
192#include "vmware_pack_end.h"
193SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
194
195typedef
196#include "vmware_pack_begin.h"
197struct SVGA3dReplyFormatData {
198 uint32 formatSupport;
199 uint32 msaa2xQualityLevels:5;
200 uint32 msaa4xQualityLevels:5;
201 uint32 msaa8xQualityLevels:5;
202 uint32 msaa16xQualityLevels:5;
203 uint32 msaa32xQualityLevels:5;
204 uint32 pad:7;
205}
206#include "vmware_pack_end.h"
207SVGA3dReplyFormatData;
208
209typedef
210#include "vmware_pack_begin.h"
211struct SVGA3dCmdDXSetSingleConstantBuffer {
212 uint32 slot;
213 SVGA3dShaderType type;
214 SVGA3dSurfaceId sid;
215 uint32 offsetInBytes;
216 uint32 sizeInBytes;
217}
218#include "vmware_pack_end.h"
219SVGA3dCmdDXSetSingleConstantBuffer;
220/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
221
222typedef
223#include "vmware_pack_begin.h"
224struct SVGA3dCmdDXSetShaderResources {
225 uint32 startView;
226 SVGA3dShaderType type;
227
228 /*
229 * Followed by a variable number of SVGA3dShaderResourceViewId's.
230 */
231}
232#include "vmware_pack_end.h"
233SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
234
235typedef
236#include "vmware_pack_begin.h"
237struct SVGA3dCmdDXSetShader {
238 SVGA3dShaderId shaderId;
239 SVGA3dShaderType type;
240}
241#include "vmware_pack_end.h"
242SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
243
244typedef
245#include "vmware_pack_begin.h"
246struct SVGA3dCmdDXSetSamplers {
247 uint32 startSampler;
248 SVGA3dShaderType type;
249
250 /*
251 * Followed by a variable number of SVGA3dSamplerId's.
252 */
253}
254#include "vmware_pack_end.h"
255SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
256
257typedef
258#include "vmware_pack_begin.h"
259struct SVGA3dCmdDXDraw {
260 uint32 vertexCount;
261 uint32 startVertexLocation;
262}
263#include "vmware_pack_end.h"
264SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
265
266typedef
267#include "vmware_pack_begin.h"
268struct SVGA3dCmdDXDrawIndexed {
269 uint32 indexCount;
270 uint32 startIndexLocation;
271 int32 baseVertexLocation;
272}
273#include "vmware_pack_end.h"
274SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
275
276typedef
277#include "vmware_pack_begin.h"
278struct SVGA3dCmdDXDrawInstanced {
279 uint32 vertexCountPerInstance;
280 uint32 instanceCount;
281 uint32 startVertexLocation;
282 uint32 startInstanceLocation;
283}
284#include "vmware_pack_end.h"
285SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
286
287typedef
288#include "vmware_pack_begin.h"
289struct SVGA3dCmdDXDrawIndexedInstanced {
290 uint32 indexCountPerInstance;
291 uint32 instanceCount;
292 uint32 startIndexLocation;
293 int32 baseVertexLocation;
294 uint32 startInstanceLocation;
295}
296#include "vmware_pack_end.h"
297SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
298
299typedef
300#include "vmware_pack_begin.h"
301struct SVGA3dCmdDXDrawAuto {
302 uint32 pad0;
303}
304#include "vmware_pack_end.h"
305SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
306
307typedef
308#include "vmware_pack_begin.h"
309struct SVGA3dCmdDXSetInputLayout {
310 SVGA3dElementLayoutId elementLayoutId;
311}
312#include "vmware_pack_end.h"
313SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
314
315typedef
316#include "vmware_pack_begin.h"
317struct SVGA3dVertexBuffer {
318 SVGA3dSurfaceId sid;
319 uint32 stride;
320 uint32 offset;
321}
322#include "vmware_pack_end.h"
323SVGA3dVertexBuffer;
324
325typedef
326#include "vmware_pack_begin.h"
327struct SVGA3dCmdDXSetVertexBuffers {
328 uint32 startBuffer;
329 /* Followed by a variable number of SVGA3dVertexBuffer's. */
330}
331#include "vmware_pack_end.h"
332SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
333
334typedef
335#include "vmware_pack_begin.h"
336struct SVGA3dCmdDXSetIndexBuffer {
337 SVGA3dSurfaceId sid;
338 SVGA3dSurfaceFormat format;
339 uint32 offset;
340}
341#include "vmware_pack_end.h"
342SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
343
344typedef
345#include "vmware_pack_begin.h"
346struct SVGA3dCmdDXSetTopology {
347 SVGA3dPrimitiveType topology;
348}
349#include "vmware_pack_end.h"
350SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
351
352typedef
353#include "vmware_pack_begin.h"
354struct SVGA3dCmdDXSetRenderTargets {
355 SVGA3dDepthStencilViewId depthStencilViewId;
356 /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
357}
358#include "vmware_pack_end.h"
359SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
360
361typedef
362#include "vmware_pack_begin.h"
363struct SVGA3dCmdDXSetBlendState {
364 SVGA3dBlendStateId blendId;
365 float blendFactor[4];
366 uint32 sampleMask;
367}
368#include "vmware_pack_end.h"
369SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
370
371typedef
372#include "vmware_pack_begin.h"
373struct SVGA3dCmdDXSetDepthStencilState {
374 SVGA3dDepthStencilStateId depthStencilId;
375 uint32 stencilRef;
376}
377#include "vmware_pack_end.h"
378SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
379
380typedef
381#include "vmware_pack_begin.h"
382struct SVGA3dCmdDXSetRasterizerState {
383 SVGA3dRasterizerStateId rasterizerId;
384}
385#include "vmware_pack_end.h"
386SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
387
388#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
389typedef uint32 SVGA3dDXQueryFlags;
390
391/*
392 * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
393 * to track query state transitions, but are not intended to be used by the
394 * driver.
395 */
396#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */
397#define SVGADX_QDSTATE_MIN 0
398#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */
399#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */
400#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */
401#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */
402#define SVGADX_QDSTATE_MAX 4
403typedef uint8 SVGADXQueryDeviceState;
404
405typedef
406#include "vmware_pack_begin.h"
407struct {
408 SVGA3dQueryTypeUint8 type;
409 uint16 pad0;
410 SVGADXQueryDeviceState state;
411 SVGA3dDXQueryFlags flags;
412 SVGAMobId mobid;
413 uint32 offset;
414}
415#include "vmware_pack_end.h"
416SVGACOTableDXQueryEntry;
417
418typedef
419#include "vmware_pack_begin.h"
420struct SVGA3dCmdDXDefineQuery {
421 SVGA3dQueryId queryId;
422 SVGA3dQueryType type;
423 SVGA3dDXQueryFlags flags;
424}
425#include "vmware_pack_end.h"
426SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
427
428typedef
429#include "vmware_pack_begin.h"
430struct SVGA3dCmdDXDestroyQuery {
431 SVGA3dQueryId queryId;
432}
433#include "vmware_pack_end.h"
434SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
435
436typedef
437#include "vmware_pack_begin.h"
438struct SVGA3dCmdDXBindQuery {
439 SVGA3dQueryId queryId;
440 SVGAMobId mobid;
441}
442#include "vmware_pack_end.h"
443SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
444
445typedef
446#include "vmware_pack_begin.h"
447struct SVGA3dCmdDXSetQueryOffset {
448 SVGA3dQueryId queryId;
449 uint32 mobOffset;
450}
451#include "vmware_pack_end.h"
452SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
453
454typedef
455#include "vmware_pack_begin.h"
456struct SVGA3dCmdDXBeginQuery {
457 SVGA3dQueryId queryId;
458}
459#include "vmware_pack_end.h"
460SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
461
462typedef
463#include "vmware_pack_begin.h"
464struct SVGA3dCmdDXEndQuery {
465 SVGA3dQueryId queryId;
466}
467#include "vmware_pack_end.h"
468SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
469
470typedef
471#include "vmware_pack_begin.h"
472struct SVGA3dCmdDXReadbackQuery {
473 SVGA3dQueryId queryId;
474}
475#include "vmware_pack_end.h"
476SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
477
478typedef
479#include "vmware_pack_begin.h"
480struct SVGA3dCmdDXMoveQuery {
481 SVGA3dQueryId queryId;
482 SVGAMobId mobid;
483 uint32 mobOffset;
484}
485#include "vmware_pack_end.h"
486SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
487
488typedef
489#include "vmware_pack_begin.h"
490struct SVGA3dCmdDXBindAllQuery {
491 uint32 cid;
492 SVGAMobId mobid;
493}
494#include "vmware_pack_end.h"
495SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
496
497typedef
498#include "vmware_pack_begin.h"
499struct SVGA3dCmdDXReadbackAllQuery {
500 uint32 cid;
501}
502#include "vmware_pack_end.h"
503SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
504
505typedef
506#include "vmware_pack_begin.h"
507struct SVGA3dCmdDXSetPredication {
508 SVGA3dQueryId queryId;
509 uint32 predicateValue;
510}
511#include "vmware_pack_end.h"
512SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
513
514typedef
515#include "vmware_pack_begin.h"
516struct MKS3dDXSOState {
517 uint32 offset; /* Starting offset */
518 uint32 intOffset; /* Internal offset */
519 uint32 vertexCount; /* vertices written */
520 uint32 sizeInBytes; /* max bytes to write */
521}
522#include "vmware_pack_end.h"
523SVGA3dDXSOState;
524
525/* Set the offset field to this value to append SO values to the buffer */
526#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
527
528typedef
529#include "vmware_pack_begin.h"
530struct SVGA3dSoTarget {
531 SVGA3dSurfaceId sid;
532 uint32 offset;
533 uint32 sizeInBytes;
534}
535#include "vmware_pack_end.h"
536SVGA3dSoTarget;
537
538typedef
539#include "vmware_pack_begin.h"
540struct SVGA3dCmdDXSetSOTargets {
541 uint32 pad0;
542 /* Followed by a variable number of SVGA3dSOTarget's. */
543}
544#include "vmware_pack_end.h"
545SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
546
547typedef
548#include "vmware_pack_begin.h"
549struct SVGA3dViewport
550{
551 float x;
552 float y;
553 float width;
554 float height;
555 float minDepth;
556 float maxDepth;
557}
558#include "vmware_pack_end.h"
559SVGA3dViewport;
560
561typedef
562#include "vmware_pack_begin.h"
563struct SVGA3dCmdDXSetViewports {
564 uint32 pad0;
565 /* Followed by a variable number of SVGA3dViewport's. */
566}
567#include "vmware_pack_end.h"
568SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
569
570#define SVGA3D_DX_MAX_VIEWPORTS 16
571
572typedef
573#include "vmware_pack_begin.h"
574struct SVGA3dCmdDXSetScissorRects {
575 uint32 pad0;
576 /* Followed by a variable number of SVGASignedRect's. */
577}
578#include "vmware_pack_end.h"
579SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
580
581#define SVGA3D_DX_MAX_SCISSORRECTS 16
582
583typedef
584#include "vmware_pack_begin.h"
585struct SVGA3dCmdDXClearRenderTargetView {
586 SVGA3dRenderTargetViewId renderTargetViewId;
587 SVGA3dRGBAFloat rgba;
588}
589#include "vmware_pack_end.h"
590SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
591
592typedef
593#include "vmware_pack_begin.h"
594struct SVGA3dCmdDXClearDepthStencilView {
595 uint16 flags;
596 uint16 stencil;
597 SVGA3dDepthStencilViewId depthStencilViewId;
598 float depth;
599}
600#include "vmware_pack_end.h"
601SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
602
603typedef
604#include "vmware_pack_begin.h"
605struct SVGA3dCmdDXPredCopyRegion {
606 SVGA3dSurfaceId dstSid;
607 uint32 dstSubResource;
608 SVGA3dSurfaceId srcSid;
609 uint32 srcSubResource;
610 SVGA3dCopyBox box;
611}
612#include "vmware_pack_end.h"
613SVGA3dCmdDXPredCopyRegion;
614/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
615
616typedef
617#include "vmware_pack_begin.h"
618struct SVGA3dCmdDXPredCopy {
619 SVGA3dSurfaceId dstSid;
620 SVGA3dSurfaceId srcSid;
621}
622#include "vmware_pack_end.h"
623SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
624
625typedef
626#include "vmware_pack_begin.h"
627struct SVGA3dCmdDXBufferCopy {
628 SVGA3dSurfaceId dest;
629 SVGA3dSurfaceId src;
630 uint32 destX;
631 uint32 srcX;
632 uint32 width;
633}
634#include "vmware_pack_end.h"
635SVGA3dCmdDXBufferCopy;
636/* SVGA_3D_CMD_DX_BUFFER_COPY */
637
638typedef uint32 SVGA3dDXStretchBltMode;
639#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
640#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
641
642typedef
643#include "vmware_pack_begin.h"
644struct SVGA3dCmdDXStretchBlt {
645 SVGA3dSurfaceId srcSid;
646 uint32 srcSubResource;
647 SVGA3dSurfaceId dstSid;
648 uint32 destSubResource;
649 SVGA3dBox boxSrc;
650 SVGA3dBox boxDest;
651 SVGA3dDXStretchBltMode mode;
652}
653#include "vmware_pack_end.h"
654SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
655
656typedef
657#include "vmware_pack_begin.h"
658struct SVGA3dCmdDXGenMips {
659 SVGA3dShaderResourceViewId shaderResourceViewId;
660}
661#include "vmware_pack_end.h"
662SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
663
664/*
665 * Defines a resource/DX surface. Resources share the surfaceId namespace.
666 *
667 */
668typedef
669#include "vmware_pack_begin.h"
670struct SVGA3dCmdDefineGBSurface_v2 {
671 uint32 sid;
672 SVGA3dSurfaceFlags surfaceFlags;
673 SVGA3dSurfaceFormat format;
674 uint32 numMipLevels;
675 uint32 multisampleCount;
676 SVGA3dTextureFilter autogenFilter;
677 SVGA3dSize size;
678 uint32 arraySize;
679 uint32 pad;
680}
681#include "vmware_pack_end.h"
682SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
683
684/*
685 * Update a sub-resource in a guest-backed resource.
686 * (Inform the device that the guest-contents have been updated.)
687 */
688typedef
689#include "vmware_pack_begin.h"
690struct SVGA3dCmdDXUpdateSubResource {
691 SVGA3dSurfaceId sid;
692 uint32 subResource;
693 SVGA3dBox box;
694}
695#include "vmware_pack_end.h"
696SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
697
698/*
699 * Readback a subresource in a guest-backed resource.
700 * (Request the device to flush the dirty contents into the guest.)
701 */
702typedef
703#include "vmware_pack_begin.h"
704struct SVGA3dCmdDXReadbackSubResource {
705 SVGA3dSurfaceId sid;
706 uint32 subResource;
707}
708#include "vmware_pack_end.h"
709SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
710
711/*
712 * Invalidate an image in a guest-backed surface.
713 * (Notify the device that the contents can be lost.)
714 */
715typedef
716#include "vmware_pack_begin.h"
717struct SVGA3dCmdDXInvalidateSubResource {
718 SVGA3dSurfaceId sid;
719 uint32 subResource;
720}
721#include "vmware_pack_end.h"
722SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
723
724
725/*
726 * Raw byte wise transfer from a buffer surface into another surface
727 * of the requested box.
728 */
729typedef
730#include "vmware_pack_begin.h"
731struct SVGA3dCmdDXTransferFromBuffer {
732 SVGA3dSurfaceId srcSid;
733 uint32 srcOffset;
734 uint32 srcPitch;
735 uint32 srcSlicePitch;
736 SVGA3dSurfaceId destSid;
737 uint32 destSubResource;
738 SVGA3dBox destBox;
739}
740#include "vmware_pack_end.h"
741SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
742
743
744/*
745 * Raw byte wise transfer from a buffer surface into another surface
746 * of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
747 * The context is implied from the command buffer header.
748 */
749typedef
750#include "vmware_pack_begin.h"
751struct SVGA3dCmdDXPredTransferFromBuffer {
752 SVGA3dSurfaceId srcSid;
753 uint32 srcOffset;
754 uint32 srcPitch;
755 uint32 srcSlicePitch;
756 SVGA3dSurfaceId destSid;
757 uint32 destSubResource;
758 SVGA3dBox destBox;
759}
760#include "vmware_pack_end.h"
761SVGA3dCmdDXPredTransferFromBuffer;
762/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
763
764
765typedef
766#include "vmware_pack_begin.h"
767struct SVGA3dCmdDXSurfaceCopyAndReadback {
768 SVGA3dSurfaceId srcSid;
769 SVGA3dSurfaceId destSid;
770 SVGA3dCopyBox box;
771}
772#include "vmware_pack_end.h"
773SVGA3dCmdDXSurfaceCopyAndReadback;
774/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
775
776
777typedef
778#include "vmware_pack_begin.h"
779struct {
780 union {
781 struct {
782 uint32 firstElement;
783 uint32 numElements;
784 uint32 pad0;
785 uint32 pad1;
786 } buffer;
787 struct {
788 uint32 mostDetailedMip;
789 uint32 firstArraySlice;
790 uint32 mipLevels;
791 uint32 arraySize;
792 } tex;
793 struct {
794 uint32 firstElement;
795 uint32 numElements;
796 uint32 flags;
797 uint32 pad0;
798 } bufferex;
799 };
800}
801#include "vmware_pack_end.h"
802SVGA3dShaderResourceViewDesc;
803
804typedef
805#include "vmware_pack_begin.h"
806struct {
807 SVGA3dSurfaceId sid;
808 SVGA3dSurfaceFormat format;
809 SVGA3dResourceType resourceDimension;
810 SVGA3dShaderResourceViewDesc desc;
811 uint32 pad;
812}
813#include "vmware_pack_end.h"
814SVGACOTableDXSRViewEntry;
815
816typedef
817#include "vmware_pack_begin.h"
818struct SVGA3dCmdDXDefineShaderResourceView {
819 SVGA3dShaderResourceViewId shaderResourceViewId;
820
821 SVGA3dSurfaceId sid;
822 SVGA3dSurfaceFormat format;
823 SVGA3dResourceType resourceDimension;
824
825 SVGA3dShaderResourceViewDesc desc;
826}
827#include "vmware_pack_end.h"
828SVGA3dCmdDXDefineShaderResourceView;
829/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
830
831typedef
832#include "vmware_pack_begin.h"
833struct SVGA3dCmdDXDestroyShaderResourceView {
834 SVGA3dShaderResourceViewId shaderResourceViewId;
835}
836#include "vmware_pack_end.h"
837SVGA3dCmdDXDestroyShaderResourceView;
838/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
839
840typedef
841#include "vmware_pack_begin.h"
842struct SVGA3dRenderTargetViewDesc {
843 union {
844 struct {
845 uint32 firstElement;
846 uint32 numElements;
847 } buffer;
848 struct {
849 uint32 mipSlice;
850 uint32 firstArraySlice;
851 uint32 arraySize;
852 } tex; /* 1d, 2d, cube */
853 struct {
854 uint32 mipSlice;
855 uint32 firstW;
856 uint32 wSize;
857 } tex3D;
858 };
859}
860#include "vmware_pack_end.h"
861SVGA3dRenderTargetViewDesc;
862
863typedef
864#include "vmware_pack_begin.h"
865struct {
866 SVGA3dSurfaceId sid;
867 SVGA3dSurfaceFormat format;
868 SVGA3dResourceType resourceDimension;
869 SVGA3dRenderTargetViewDesc desc;
870 uint32 pad[2];
871}
872#include "vmware_pack_end.h"
873SVGACOTableDXRTViewEntry;
874
875typedef
876#include "vmware_pack_begin.h"
877struct SVGA3dCmdDXDefineRenderTargetView {
878 SVGA3dRenderTargetViewId renderTargetViewId;
879
880 SVGA3dSurfaceId sid;
881 SVGA3dSurfaceFormat format;
882 SVGA3dResourceType resourceDimension;
883
884 SVGA3dRenderTargetViewDesc desc;
885}
886#include "vmware_pack_end.h"
887SVGA3dCmdDXDefineRenderTargetView;
888/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
889
890typedef
891#include "vmware_pack_begin.h"
892struct SVGA3dCmdDXDestroyRenderTargetView {
893 SVGA3dRenderTargetViewId renderTargetViewId;
894}
895#include "vmware_pack_end.h"
896SVGA3dCmdDXDestroyRenderTargetView;
897/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
898
899/*
900 */
901#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01
902#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
903#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03
904typedef uint8 SVGA3DCreateDSViewFlags;
905
906typedef
907#include "vmware_pack_begin.h"
908struct {
909 SVGA3dSurfaceId sid;
910 SVGA3dSurfaceFormat format;
911 SVGA3dResourceType resourceDimension;
912 uint32 mipSlice;
913 uint32 firstArraySlice;
914 uint32 arraySize;
915 SVGA3DCreateDSViewFlags flags;
916 uint8 pad0;
917 uint16 pad1;
918 uint32 pad2;
919}
920#include "vmware_pack_end.h"
921SVGACOTableDXDSViewEntry;
922
923typedef
924#include "vmware_pack_begin.h"
925struct SVGA3dCmdDXDefineDepthStencilView {
926 SVGA3dDepthStencilViewId depthStencilViewId;
927
928 SVGA3dSurfaceId sid;
929 SVGA3dSurfaceFormat format;
930 SVGA3dResourceType resourceDimension;
931 uint32 mipSlice;
932 uint32 firstArraySlice;
933 uint32 arraySize;
934 SVGA3DCreateDSViewFlags flags;
935 uint8 pad0;
936 uint16 pad1;
937}
938#include "vmware_pack_end.h"
939SVGA3dCmdDXDefineDepthStencilView;
940/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
941
942typedef
943#include "vmware_pack_begin.h"
944struct SVGA3dCmdDXDestroyDepthStencilView {
945 SVGA3dDepthStencilViewId depthStencilViewId;
946}
947#include "vmware_pack_end.h"
948SVGA3dCmdDXDestroyDepthStencilView;
949/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
950
951typedef
952#include "vmware_pack_begin.h"
953struct SVGA3dInputElementDesc {
954 uint32 inputSlot;
955 uint32 alignedByteOffset;
956 SVGA3dSurfaceFormat format;
957 SVGA3dInputClassification inputSlotClass;
958 uint32 instanceDataStepRate;
959 uint32 inputRegister;
960}
961#include "vmware_pack_end.h"
962SVGA3dInputElementDesc;
963
964typedef
965#include "vmware_pack_begin.h"
966struct {
967 /*
968 * XXX: How many of these can there be?
969 */
970 uint32 elid;
971 uint32 numDescs;
972 SVGA3dInputElementDesc desc[32];
973 uint32 pad[62];
974}
975#include "vmware_pack_end.h"
976SVGACOTableDXElementLayoutEntry;
977
978typedef
979#include "vmware_pack_begin.h"
980struct SVGA3dCmdDXDefineElementLayout {
981 SVGA3dElementLayoutId elementLayoutId;
982 /* Followed by a variable number of SVGA3dInputElementDesc's. */
983}
984#include "vmware_pack_end.h"
985SVGA3dCmdDXDefineElementLayout;
986/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
987
988typedef
989#include "vmware_pack_begin.h"
990struct SVGA3dCmdDXDestroyElementLayout {
991 SVGA3dElementLayoutId elementLayoutId;
992}
993#include "vmware_pack_end.h"
994SVGA3dCmdDXDestroyElementLayout;
995/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
996
997
998#define SVGA3D_DX_MAX_RENDER_TARGETS 8
999
1000typedef
1001#include "vmware_pack_begin.h"
1002struct SVGA3dDXBlendStatePerRT {
1003 uint8 blendEnable;
1004 uint8 srcBlend;
1005 uint8 destBlend;
1006 uint8 blendOp;
1007 uint8 srcBlendAlpha;
1008 uint8 destBlendAlpha;
1009 uint8 blendOpAlpha;
1010 uint8 renderTargetWriteMask;
1011 uint8 logicOpEnable;
1012 uint8 logicOp;
1013 uint16 pad0;
1014}
1015#include "vmware_pack_end.h"
1016SVGA3dDXBlendStatePerRT;
1017
1018typedef
1019#include "vmware_pack_begin.h"
1020struct {
1021 uint8 alphaToCoverageEnable;
1022 uint8 independentBlendEnable;
1023 uint16 pad0;
1024 SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
1025 uint32 pad1[7];
1026}
1027#include "vmware_pack_end.h"
1028SVGACOTableDXBlendStateEntry;
1029
1030/*
1031 */
1032typedef
1033#include "vmware_pack_begin.h"
1034struct SVGA3dCmdDXDefineBlendState {
1035 SVGA3dBlendStateId blendId;
1036 uint8 alphaToCoverageEnable;
1037 uint8 independentBlendEnable;
1038 uint16 pad0;
1039 SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
1040}
1041#include "vmware_pack_end.h"
1042SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
1043
1044typedef
1045#include "vmware_pack_begin.h"
1046struct SVGA3dCmdDXDestroyBlendState {
1047 SVGA3dBlendStateId blendId;
1048}
1049#include "vmware_pack_end.h"
1050SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
1051
1052typedef
1053#include "vmware_pack_begin.h"
1054struct {
1055 uint8 depthEnable;
1056 SVGA3dDepthWriteMask depthWriteMask;
1057 SVGA3dComparisonFunc depthFunc;
1058 uint8 stencilEnable;
1059 uint8 frontEnable;
1060 uint8 backEnable;
1061 uint8 stencilReadMask;
1062 uint8 stencilWriteMask;
1063
1064 uint8 frontStencilFailOp;
1065 uint8 frontStencilDepthFailOp;
1066 uint8 frontStencilPassOp;
1067 SVGA3dComparisonFunc frontStencilFunc;
1068
1069 uint8 backStencilFailOp;
1070 uint8 backStencilDepthFailOp;
1071 uint8 backStencilPassOp;
1072 SVGA3dComparisonFunc backStencilFunc;
1073}
1074#include "vmware_pack_end.h"
1075SVGACOTableDXDepthStencilEntry;
1076
1077/*
1078 */
1079typedef
1080#include "vmware_pack_begin.h"
1081struct SVGA3dCmdDXDefineDepthStencilState {
1082 SVGA3dDepthStencilStateId depthStencilId;
1083
1084 uint8 depthEnable;
1085 SVGA3dDepthWriteMask depthWriteMask;
1086 SVGA3dComparisonFunc depthFunc;
1087 uint8 stencilEnable;
1088 uint8 frontEnable;
1089 uint8 backEnable;
1090 uint8 stencilReadMask;
1091 uint8 stencilWriteMask;
1092
1093 uint8 frontStencilFailOp;
1094 uint8 frontStencilDepthFailOp;
1095 uint8 frontStencilPassOp;
1096 SVGA3dComparisonFunc frontStencilFunc;
1097
1098 uint8 backStencilFailOp;
1099 uint8 backStencilDepthFailOp;
1100 uint8 backStencilPassOp;
1101 SVGA3dComparisonFunc backStencilFunc;
1102}
1103#include "vmware_pack_end.h"
1104SVGA3dCmdDXDefineDepthStencilState;
1105/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
1106
1107typedef
1108#include "vmware_pack_begin.h"
1109struct SVGA3dCmdDXDestroyDepthStencilState {
1110 SVGA3dDepthStencilStateId depthStencilId;
1111}
1112#include "vmware_pack_end.h"
1113SVGA3dCmdDXDestroyDepthStencilState;
1114/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
1115
1116typedef
1117#include "vmware_pack_begin.h"
1118struct {
1119 uint8 fillMode;
1120 SVGA3dCullMode cullMode;
1121 uint8 frontCounterClockwise;
1122 uint8 provokingVertexLast;
1123 int32 depthBias;
1124 float depthBiasClamp;
1125 float slopeScaledDepthBias;
1126 uint8 depthClipEnable;
1127 uint8 scissorEnable;
1128 uint8 multisampleEnable;
1129 uint8 antialiasedLineEnable;
1130 float lineWidth;
1131 uint8 lineStippleEnable;
1132 uint8 lineStippleFactor;
1133 uint16 lineStipplePattern;
1134 uint32 forcedSampleCount;
1135}
1136#include "vmware_pack_end.h"
1137SVGACOTableDXRasterizerStateEntry;
1138
1139/*
1140 */
1141typedef
1142#include "vmware_pack_begin.h"
1143struct SVGA3dCmdDXDefineRasterizerState {
1144 SVGA3dRasterizerStateId rasterizerId;
1145
1146 uint8 fillMode;
1147 SVGA3dCullMode cullMode;
1148 uint8 frontCounterClockwise;
1149 uint8 provokingVertexLast;
1150 int32 depthBias;
1151 float depthBiasClamp;
1152 float slopeScaledDepthBias;
1153 uint8 depthClipEnable;
1154 uint8 scissorEnable;
1155 uint8 multisampleEnable;
1156 uint8 antialiasedLineEnable;
1157 float lineWidth;
1158 uint8 lineStippleEnable;
1159 uint8 lineStippleFactor;
1160 uint16 lineStipplePattern;
1161}
1162#include "vmware_pack_end.h"
1163SVGA3dCmdDXDefineRasterizerState;
1164/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
1165
1166typedef
1167#include "vmware_pack_begin.h"
1168struct SVGA3dCmdDXDestroyRasterizerState {
1169 SVGA3dRasterizerStateId rasterizerId;
1170}
1171#include "vmware_pack_end.h"
1172SVGA3dCmdDXDestroyRasterizerState;
1173/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
1174
1175typedef
1176#include "vmware_pack_begin.h"
1177struct {
1178 SVGA3dFilter filter;
1179 uint8 addressU;
1180 uint8 addressV;
1181 uint8 addressW;
1182 uint8 pad0;
1183 float mipLODBias;
1184 uint8 maxAnisotropy;
1185 SVGA3dComparisonFunc comparisonFunc;
1186 uint16 pad1;
1187 SVGA3dRGBAFloat borderColor;
1188 float minLOD;
1189 float maxLOD;
1190 uint32 pad2[6];
1191}
1192#include "vmware_pack_end.h"
1193SVGACOTableDXSamplerEntry;
1194
1195/*
1196 */
1197typedef
1198#include "vmware_pack_begin.h"
1199struct SVGA3dCmdDXDefineSamplerState {
1200 SVGA3dSamplerId samplerId;
1201 SVGA3dFilter filter;
1202 uint8 addressU;
1203 uint8 addressV;
1204 uint8 addressW;
1205 uint8 pad0;
1206 float mipLODBias;
1207 uint8 maxAnisotropy;
1208 SVGA3dComparisonFunc comparisonFunc;
1209 uint16 pad1;
1210 SVGA3dRGBAFloat borderColor;
1211 float minLOD;
1212 float maxLOD;
1213}
1214#include "vmware_pack_end.h"
1215SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
1216
1217typedef
1218#include "vmware_pack_begin.h"
1219struct SVGA3dCmdDXDestroySamplerState {
1220 SVGA3dSamplerId samplerId;
1221}
1222#include "vmware_pack_end.h"
1223SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
1224
1225/*
1226 */
1227typedef
1228#include "vmware_pack_begin.h"
1229struct SVGA3dSignatureEntry {
1230 uint8 systemValue;
1231 uint8 reg; /* register is a reserved word */
1232 uint16 mask;
1233 uint8 registerComponentType;
1234 uint8 minPrecision;
1235 uint16 pad0;
1236}
1237#include "vmware_pack_end.h"
1238SVGA3dSignatureEntry;
1239
1240typedef
1241#include "vmware_pack_begin.h"
1242struct SVGA3dCmdDXDefineShader {
1243 SVGA3dShaderId shaderId;
1244 SVGA3dShaderType type;
1245 uint32 sizeInBytes; /* Number of bytes of shader text. */
1246}
1247#include "vmware_pack_end.h"
1248SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
1249
1250typedef
1251#include "vmware_pack_begin.h"
1252struct SVGACOTableDXShaderEntry {
1253 SVGA3dShaderType type;
1254 uint32 sizeInBytes;
1255 uint32 offsetInBytes;
1256 SVGAMobId mobid;
1257 uint32 numInputSignatureEntries;
1258 uint32 numOutputSignatureEntries;
1259
1260 uint32 numPatchConstantSignatureEntries;
1261
1262 uint32 pad;
1263}
1264#include "vmware_pack_end.h"
1265SVGACOTableDXShaderEntry;
1266
1267typedef
1268#include "vmware_pack_begin.h"
1269struct SVGA3dCmdDXDestroyShader {
1270 SVGA3dShaderId shaderId;
1271}
1272#include "vmware_pack_end.h"
1273SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
1274
1275typedef
1276#include "vmware_pack_begin.h"
1277struct SVGA3dCmdDXBindShader {
1278 uint32 cid;
1279 uint32 shid;
1280 SVGAMobId mobid;
1281 uint32 offsetInBytes;
1282}
1283#include "vmware_pack_end.h"
1284SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
1285
1286/*
1287 * The maximum number of streamout decl's in each streamout entry.
1288 */
1289#define SVGA3D_MAX_STREAMOUT_DECLS 64
1290
1291typedef
1292#include "vmware_pack_begin.h"
1293struct SVGA3dStreamOutputDeclarationEntry {
1294 uint32 outputSlot;
1295 uint32 registerIndex;
1296 uint8 registerMask;
1297 uint8 pad0;
1298 uint16 pad1;
1299 uint32 stream;
1300}
1301#include "vmware_pack_end.h"
1302SVGA3dStreamOutputDeclarationEntry;
1303
1304typedef
1305#include "vmware_pack_begin.h"
1306struct SVGAOTableStreamOutputEntry {
1307 uint32 numOutputStreamEntries;
1308 SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
1309 uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
1310 uint32 rasterizedStream;
1311 uint32 pad[250];
1312}
1313#include "vmware_pack_end.h"
1314SVGACOTableDXStreamOutputEntry;
1315
1316typedef
1317#include "vmware_pack_begin.h"
1318struct SVGA3dCmdDXDefineStreamOutput {
1319 SVGA3dStreamOutputId soid;
1320 uint32 numOutputStreamEntries;
1321 SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
1322 uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
1323 uint32 rasterizedStream;
1324}
1325#include "vmware_pack_end.h"
1326SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
1327
1328typedef
1329#include "vmware_pack_begin.h"
1330struct SVGA3dCmdDXDestroyStreamOutput {
1331 SVGA3dStreamOutputId soid;
1332}
1333#include "vmware_pack_end.h"
1334SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
1335
1336typedef
1337#include "vmware_pack_begin.h"
1338struct SVGA3dCmdDXSetStreamOutput {
1339 SVGA3dStreamOutputId soid;
1340}
1341#include "vmware_pack_end.h"
1342SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
1343
1344typedef
1345#include "vmware_pack_begin.h"
1346struct {
1347 uint64 value;
1348 uint32 mobId;
1349 uint32 mobOffset;
1350}
1351#include "vmware_pack_end.h"
1352SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
1353
1354/*
1355 * SVGA3dCmdSetCOTable --
1356 *
1357 * This command allows the guest to bind a mob to a context-object table.
1358 */
1359
1360typedef
1361#include "vmware_pack_begin.h"
1362struct SVGA3dCmdDXSetCOTable {
1363 uint32 cid;
1364 uint32 mobid;
1365 SVGACOTableType type;
1366 uint32 validSizeInBytes;
1367}
1368#include "vmware_pack_end.h"
1369SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
1370
1371typedef
1372#include "vmware_pack_begin.h"
1373struct SVGA3dCmdDXReadbackCOTable {
1374 uint32 cid;
1375 SVGACOTableType type;
1376}
1377#include "vmware_pack_end.h"
1378SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
1379
1380typedef
1381#include "vmware_pack_begin.h"
1382struct SVGA3dCOTableData {
1383 uint32 mobid;
1384}
1385#include "vmware_pack_end.h"
1386SVGA3dCOTableData;
1387
1388typedef
1389#include "vmware_pack_begin.h"
1390struct SVGA3dBufferBinding {
1391 uint32 bufferId;
1392 uint32 stride;
1393 uint32 offset;
1394}
1395#include "vmware_pack_end.h"
1396SVGA3dBufferBinding;
1397
1398typedef
1399#include "vmware_pack_begin.h"
1400struct SVGA3dConstantBufferBinding {
1401 uint32 sid;
1402 uint32 offsetInBytes;
1403 uint32 sizeInBytes;
1404}
1405#include "vmware_pack_end.h"
1406SVGA3dConstantBufferBinding;
1407
1408typedef
1409#include "vmware_pack_begin.h"
1410struct SVGADXInputAssemblyMobFormat {
1411 uint32 layoutId;
1412 SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
1413 uint32 indexBufferSid;
1414 uint32 pad;
1415 uint32 indexBufferOffset;
1416 uint32 indexBufferFormat;
1417 uint32 topology;
1418}
1419#include "vmware_pack_end.h"
1420SVGADXInputAssemblyMobFormat;
1421
1422typedef
1423#include "vmware_pack_begin.h"
1424struct SVGADXContextMobFormat {
1425 SVGADXInputAssemblyMobFormat inputAssembly;
1426
1427 struct {
1428 uint32 blendStateId;
1429 uint32 blendFactor[4];
1430 uint32 sampleMask;
1431 uint32 depthStencilStateId;
1432 uint32 stencilRef;
1433 uint32 rasterizerStateId;
1434 uint32 depthStencilViewId;
1435 uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
1436 uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
1437 } renderState;
1438
1439 struct {
1440 uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
1441 uint32 soid;
1442 } streamOut;
1443 uint32 pad0[11];
1444
1445 uint8 numViewports;
1446 uint8 numScissorRects;
1447 uint16 pad1[1];
1448
1449 uint32 pad2[3];
1450
1451 SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
1452 uint32 pad3[32];
1453
1454 SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
1455 uint32 pad4[64];
1456
1457 struct {
1458 uint32 queryID;
1459 uint32 value;
1460 } predication;
1461 uint32 pad5[2];
1462
1463 struct {
1464 uint32 shaderId;
1465 SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
1466 uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
1467 uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
1468 } shaderState[SVGA3D_NUM_SHADERTYPE];
1469 uint32 pad6[26];
1470
1471 SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
1472
1473 SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
1474 uint32 pad7[381];
1475}
1476#include "vmware_pack_end.h"
1477SVGADXContextMobFormat;
1478
1479typedef
1480#include "vmware_pack_begin.h"
1481struct SVGA3dCmdDXTempSetContext {
1482 uint32 dxcid;
1483}
1484#include "vmware_pack_end.h"
1485SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
1486
1487#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 000000000000..a1c36877ad55
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
1/**********************************************************
2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_limits.h --
28 *
29 * SVGA 3d hardware limits
30 */
31
32#ifndef _SVGA3D_LIMITS_H_
33#define _SVGA3D_LIMITS_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41#define SVGA3D_NUM_CLIPPLANES 6
42#define SVGA3D_MAX_RENDER_TARGETS 8
43#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
44#define SVGA3D_MAX_UAVIEWS 8
45#define SVGA3D_MAX_CONTEXT_IDS 256
46#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
47
48/*
49 * Maximum ID a shader can be assigned on a given context.
50 */
51#define SVGA3D_MAX_SHADERIDS 5000
52/*
53 * Maximum number of shaders of a given type that can be defined
54 * (including all contexts).
55 */
56#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000
57
58#define SVGA3D_NUM_TEXTURE_UNITS 32
59#define SVGA3D_NUM_LIGHTS 8
60
61/*
62 * Maximum size in dwords of shader text the SVGA device will allow.
63 * Currently 8 MB.
64 */
65#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
66
67#define SVGA3D_MAX_CLIP_PLANES 6
68
69/*
70 * This is the limit to the number of fixed-function texture
71 * transforms and texture coordinates we can support. It does *not*
72 * correspond to the number of texture image units (samplers) we
73 * support!
74 */
75#define SVGA3D_MAX_TEXTURE_COORDS 8
76
77/*
78 * Number of faces in a cubemap.
79 */
80#define SVGA3D_MAX_SURFACE_FACES 6
81
82/*
83 * Maximum number of array indexes in a GB surface (with DX enabled).
84 */
85#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
86
87/*
88 * The maximum number of vertex arrays we're guaranteed to support in
89 * SVGA_3D_CMD_DRAWPRIMITIVES.
90 */
91#define SVGA3D_MAX_VERTEX_ARRAYS 32
92
93/*
94 * The maximum number of primitive ranges we're guaranteed to support
95 * in SVGA_3D_CMD_DRAWPRIMITIVES.
96 */
97#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
98
99#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 000000000000..b44ce648f592
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
1/**********************************************************
2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3d hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41#include "svga_reg.h"
42
43#include "svga3d_types.h"
44#include "svga3d_limits.h"
45#include "svga3d_cmd.h"
46#include "svga3d_dx.h"
47#include "svga3d_devcaps.h"
48
49
50#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..58704f0a4607
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
1/**************************************************************************
2 *
3 * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifdef __KERNEL__
29
30#include <drm/vmwgfx_drm.h>
31#define surf_size_struct struct drm_vmw_size
32
33#else /* __KERNEL__ */
34
35#ifndef ARRAY_SIZE
36#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
37#endif /* ARRAY_SIZE */
38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define surf_size_struct SVGA3dSize
42#define u32 uint32
43
44#endif /* __KERNEL__ */
45
46#include "svga3d_reg.h"
47
48/*
49 * enum svga3d_block_desc describes the active data channels in a block.
50 *
51 * There can be at-most four active channels in a block:
52 * 1. Red, bump W, luminance and depth are stored in the first channel.
53 * 2. Green, bump V and stencil are stored in the second channel.
54 * 3. Blue and bump U are stored in the third channel.
55 * 4. Alpha and bump Q are stored in the fourth channel.
56 *
57 * Block channels can be used to store compressed and buffer data:
58 * 1. For compressed formats, only the data channel is used and its size
59 * is equal to that of a singular block in the compression scheme.
60 * 2. For buffer formats, only the data channel is used and its size is
61 * exactly one byte in length.
62 * 3. In each case the bit depth represent the size of a singular block.
63 *
64 * Note: Compressed and IEEE formats do not use the bitMask structure.
65 */
66
67enum svga3d_block_desc {
68 SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
69 SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
70 data */
71 SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
72 data */
73 SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
74 U and V */
75 SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
76 data */
77 SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
78 data */
79 SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
80 channel */
81 SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
82 data */
83 SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
84 data */
85 SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
86 data */
87 SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
88 data */
89 SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
90 SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
91 channel */
92 SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
93 data */
94 SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
95 data */
96 SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
97 data depending on the
98 compression method used */
99 SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
100 floating point
101 representation in
102 all channels */
103 SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
104 data. */
105 SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
106 SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
107 SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
108 SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
109 SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
110 e.g., NV12. */
111 SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
112 Y, U, V, e.g., YV12. */
113
114 SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
115 SVGA3DBLOCKDESC_GREEN,
116 SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
117 SVGA3DBLOCKDESC_BLUE,
118 SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
119 SVGA3DBLOCKDESC_SRGB,
120 SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
121 SVGA3DBLOCKDESC_ALPHA,
122 SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
123 SVGA3DBLOCKDESC_SRGB,
124 SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
125 SVGA3DBLOCKDESC_V,
126 SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
127 SVGA3DBLOCKDESC_LUMINANCE,
128 SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
129 SVGA3DBLOCKDESC_W,
130 SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
131 SVGA3DBLOCKDESC_ALPHA,
132 SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
133 SVGA3DBLOCKDESC_V |
134 SVGA3DBLOCKDESC_W |
135 SVGA3DBLOCKDESC_Q,
136 SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
137 SVGA3DBLOCKDESC_ALPHA,
138 SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
139 SVGA3DBLOCKDESC_IEEE_FP,
140 SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
141 SVGA3DBLOCKDESC_GREEN,
142 SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
143 SVGA3DBLOCKDESC_BLUE,
144 SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
145 SVGA3DBLOCKDESC_ALPHA,
146 SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
147 SVGA3DBLOCKDESC_STENCIL,
148 SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
149 SVGA3DBLOCKDESC_Y,
150 SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
151 SVGA3DBLOCKDESC_Y |
152 SVGA3DBLOCKDESC_U_VIDEO |
153 SVGA3DBLOCKDESC_V_VIDEO,
154 SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
155 SVGA3DBLOCKDESC_EXP,
156 SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
157 SVGA3DBLOCKDESC_SRGB,
158 SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
159 SVGA3DBLOCKDESC_2PLANAR_YUV,
160 SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
161 SVGA3DBLOCKDESC_3PLANAR_YUV,
162};
163
164/*
165 * SVGA3dSurfaceDesc describes the actual pixel data.
166 *
167 * This structure provides the following information:
168 * 1. Block description.
169 * 2. Dimensions of a block in the surface.
170 * 3. Size of block in bytes.
171 * 4. Bit depth of the pixel data.
172 * 5. Channel bit depths and masks (if applicable).
173 */
174struct svga3d_channel_def {
175 union {
176 u8 blue;
177 u8 u;
178 u8 uv_video;
179 u8 u_video;
180 };
181 union {
182 u8 green;
183 u8 v;
184 u8 stencil;
185 u8 v_video;
186 };
187 union {
188 u8 red;
189 u8 w;
190 u8 luminance;
191 u8 y;
192 u8 depth;
193 u8 data;
194 };
195 union {
196 u8 alpha;
197 u8 q;
198 u8 exp;
199 };
200};
201
202struct svga3d_surface_desc {
203 SVGA3dSurfaceFormat format;
204 enum svga3d_block_desc block_desc;
205 surf_size_struct block_size;
206 u32 bytes_per_block;
207 u32 pitch_bytes_per_block;
208
209 u32 total_bit_depth;
210 struct svga3d_channel_def bit_depth;
211 struct svga3d_channel_def bit_offset;
212};
213
214static const struct svga3d_surface_desc svga3d_surface_descs[] = {
215 {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
216 {1, 1, 1}, 0, 0,
217 0, {{0}, {0}, {0}, {0}},
218 {{0}, {0}, {0}, {0}}},
219
220 {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
221 {1, 1, 1}, 4, 4,
222 24, {{8}, {8}, {8}, {0}},
223 {{0}, {8}, {16}, {24}}},
224
225 {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
226 {1, 1, 1}, 4, 4,
227 32, {{8}, {8}, {8}, {8}},
228 {{0}, {8}, {16}, {24}}},
229
230 {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
231 {1, 1, 1}, 2, 2,
232 16, {{5}, {6}, {5}, {0}},
233 {{0}, {5}, {11}, {0}}},
234
235 {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
236 {1, 1, 1}, 2, 2,
237 15, {{5}, {5}, {5}, {0}},
238 {{0}, {5}, {10}, {0}}},
239
240 {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
241 {1, 1, 1}, 2, 2,
242 16, {{5}, {5}, {5}, {1}},
243 {{0}, {5}, {10}, {15}}},
244
245 {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
246 {1, 1, 1}, 2, 2,
247 16, {{4}, {4}, {4}, {4}},
248 {{0}, {4}, {8}, {12}}},
249
250 {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
251 {1, 1, 1}, 4, 4,
252 32, {{0}, {0}, {32}, {0}},
253 {{0}, {0}, {0}, {0}}},
254
255 {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
256 {1, 1, 1}, 2, 2,
257 16, {{0}, {0}, {16}, {0}},
258 {{0}, {0}, {0}, {0}}},
259
260 {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
261 {1, 1, 1}, 4, 4,
262 32, {{0}, {8}, {24}, {0}},
263 {{0}, {24}, {0}, {0}}},
264
265 {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
266 {1, 1, 1}, 2, 2,
267 16, {{0}, {1}, {15}, {0}},
268 {{0}, {15}, {0}, {0}}},
269
270 {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
271 {1, 1, 1}, 1, 1,
272 8, {{0}, {0}, {8}, {0}},
273 {{0}, {0}, {0}, {0}}},
274
275 {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
276 {1 , 1, 1}, 1, 1,
277 8, {{0}, {0}, {4}, {4}},
278 {{0}, {0}, {0}, {4}}},
279
280 {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
281 {1, 1, 1}, 2, 2,
282 16, {{0}, {0}, {16}, {0}},
283 {{0}, {0}, {0}, {0}}},
284
285 {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
286 {1, 1, 1}, 2, 2,
287 16, {{0}, {0}, {8}, {8}},
288 {{0}, {0}, {0}, {8}}},
289
290 {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
291 {4, 4, 1}, 8, 8,
292 64, {{0}, {0}, {64}, {0}},
293 {{0}, {0}, {0}, {0}}},
294
295 {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
296 {4, 4, 1}, 16, 16,
297 128, {{0}, {0}, {128}, {0}},
298 {{0}, {0}, {0}, {0}}},
299
300 {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
301 {4, 4, 1}, 16, 16,
302 128, {{0}, {0}, {128}, {0}},
303 {{0}, {0}, {0}, {0}}},
304
305 {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
306 {4, 4, 1}, 16, 16,
307 128, {{0}, {0}, {128}, {0}},
308 {{0}, {0}, {0}, {0}}},
309
310 {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
311 {4, 4, 1}, 16, 16,
312 128, {{0}, {0}, {128}, {0}},
313 {{0}, {0}, {0}, {0}}},
314
315 {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
316 {1, 1, 1}, 2, 2,
317 16, {{0}, {0}, {8}, {8}},
318 {{0}, {0}, {0}, {8}}},
319
320 {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
321 {1, 1, 1}, 2, 2,
322 16, {{5}, {5}, {6}, {0}},
323 {{11}, {6}, {0}, {0}}},
324
325 {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
326 {1, 1, 1}, 4, 4,
327 32, {{8}, {8}, {8}, {0}},
328 {{16}, {8}, {0}, {0}}},
329
330 {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
331 {1, 1, 1}, 3, 3,
332 24, {{8}, {8}, {8}, {0}},
333 {{16}, {8}, {0}, {0}}},
334
335 {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
336 {1, 1, 1}, 8, 8,
337 64, {{16}, {16}, {16}, {16}},
338 {{32}, {16}, {0}, {48}}},
339
340 {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
341 {1, 1, 1}, 16, 16,
342 128, {{32}, {32}, {32}, {32}},
343 {{64}, {32}, {0}, {96}}},
344
345 {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
346 {1, 1, 1}, 4, 4,
347 32, {{10}, {10}, {10}, {2}},
348 {{0}, {10}, {20}, {30}}},
349
350 {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
351 {1, 1, 1}, 2, 2,
352 16, {{8}, {8}, {0}, {0}},
353 {{8}, {0}, {0}, {0}}},
354
355 {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
356 {1, 1, 1}, 4, 4,
357 32, {{8}, {8}, {8}, {8}},
358 {{24}, {16}, {8}, {0}}},
359
360 {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
361 {1, 1, 1}, 2, 2,
362 16, {{8}, {8}, {0}, {0}},
363 {{8}, {0}, {0}, {0}}},
364
365 {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
366 {1, 1, 1}, 4, 4,
367 24, {{8}, {8}, {8}, {0}},
368 {{16}, {8}, {0}, {0}}},
369
370 {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
371 {1, 1, 1}, 4, 4,
372 32, {{10}, {10}, {10}, {2}},
373 {{0}, {10}, {20}, {30}}},
374
375 {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
376 {1, 1, 1}, 1, 1,
377 8, {{0}, {0}, {0}, {8}},
378 {{0}, {0}, {0}, {0}}},
379
380 {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
381 {1, 1, 1}, 2, 2,
382 16, {{0}, {0}, {16}, {0}},
383 {{0}, {0}, {0}, {0}}},
384
385 {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
386 {1, 1, 1}, 4, 4,
387 32, {{0}, {0}, {32}, {0}},
388 {{0}, {0}, {0}, {0}}},
389
390 {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
391 {1, 1, 1}, 4, 4,
392 32, {{0}, {16}, {16}, {0}},
393 {{0}, {16}, {0}, {0}}},
394
395 {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
396 {1, 1, 1}, 8, 8,
397 64, {{0}, {32}, {32}, {0}},
398 {{0}, {32}, {0}, {0}}},
399
400 {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
401 {1, 1, 1}, 1, 1,
402 8, {{0}, {0}, {8}, {0}},
403 {{0}, {0}, {0}, {0}}},
404
405 {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
406 {1, 1, 1}, 4, 4,
407 32, {{0}, {0}, {24}, {0}},
408 {{0}, {24}, {0}, {0}}},
409
410 {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
411 {1, 1, 1}, 4, 4,
412 32, {{16}, {16}, {0}, {0}},
413 {{16}, {0}, {0}, {0}}},
414
415 {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
416 {1, 1, 1}, 4, 4,
417 32, {{0}, {16}, {16}, {0}},
418 {{0}, {0}, {16}, {0}}},
419
420 {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
421 {1, 1, 1}, 8, 8,
422 64, {{16}, {16}, {16}, {16}},
423 {{32}, {16}, {0}, {48}}},
424
425 {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
426 {1, 1, 1}, 2, 2,
427 16, {{8}, {0}, {8}, {0}},
428 {{0}, {0}, {8}, {0}}},
429
430 {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
431 {1, 1, 1}, 2, 2,
432 16, {{8}, {0}, {8}, {0}},
433 {{8}, {0}, {0}, {0}}},
434
435 {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
436 {2, 2, 1}, 6, 2,
437 48, {{0}, {0}, {48}, {0}},
438 {{0}, {0}, {0}, {0}}},
439
440 {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
441 {1, 1, 1}, 4, 4,
442 32, {{8}, {8}, {8}, {8}},
443 {{0}, {8}, {16}, {24}}},
444
445 {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
446 {1, 1, 1}, 16, 16,
447 128, {{32}, {32}, {32}, {32}},
448 {{64}, {32}, {0}, {96}}},
449
450 {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
451 {1, 1, 1}, 16, 16,
452 128, {{32}, {32}, {32}, {32}},
453 {{64}, {32}, {0}, {96}}},
454
455 {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
456 {1, 1, 1}, 16, 16,
457 128, {{32}, {32}, {32}, {32}},
458 {{64}, {32}, {0}, {96}}},
459
460 {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
461 {1, 1, 1}, 12, 12,
462 96, {{32}, {32}, {32}, {0}},
463 {{64}, {32}, {0}, {0}}},
464
465 {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
466 {1, 1, 1}, 12, 12,
467 96, {{32}, {32}, {32}, {0}},
468 {{64}, {32}, {0}, {0}}},
469
470 {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
471 {1, 1, 1}, 12, 12,
472 96, {{32}, {32}, {32}, {0}},
473 {{64}, {32}, {0}, {0}}},
474
475 {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
476 {1, 1, 1}, 12, 12,
477 96, {{32}, {32}, {32}, {0}},
478 {{64}, {32}, {0}, {0}}},
479
480 {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
481 {1, 1, 1}, 8, 8,
482 64, {{16}, {16}, {16}, {16}},
483 {{32}, {16}, {0}, {48}}},
484
485 {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
486 {1, 1, 1}, 8, 8,
487 64, {{16}, {16}, {16}, {16}},
488 {{32}, {16}, {0}, {48}}},
489
490 {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
491 {1, 1, 1}, 8, 8,
492 64, {{16}, {16}, {16}, {16}},
493 {{32}, {16}, {0}, {48}}},
494
495 {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
496 {1, 1, 1}, 8, 8,
497 64, {{16}, {16}, {16}, {16}},
498 {{32}, {16}, {0}, {48}}},
499
500 {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
501 {1, 1, 1}, 8, 8,
502 64, {{0}, {32}, {32}, {0}},
503 {{0}, {32}, {0}, {0}}},
504
505 {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
506 {1, 1, 1}, 8, 8,
507 64, {{0}, {32}, {32}, {0}},
508 {{0}, {32}, {0}, {0}}},
509
510 {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
511 {1, 1, 1}, 8, 8,
512 64, {{0}, {32}, {32}, {0}},
513 {{0}, {32}, {0}, {0}}},
514
515 {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
516 {1, 1, 1}, 8, 8,
517 64, {{0}, {8}, {32}, {0}},
518 {{0}, {32}, {0}, {0}}},
519
520 {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
521 {1, 1, 1}, 8, 8,
522 64, {{0}, {8}, {32}, {0}},
523 {{0}, {32}, {0}, {0}}},
524
525 {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
526 {1, 1, 1}, 8, 8,
527 64, {{0}, {0}, {32}, {0}},
528 {{0}, {0}, {0}, {0}}},
529
530 {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
531 {1, 1, 1}, 8, 8,
532 64, {{0}, {8}, {0}, {0}},
533 {{0}, {32}, {0}, {0}}},
534
535 {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
536 {1, 1, 1}, 4, 4,
537 32, {{10}, {10}, {10}, {2}},
538 {{0}, {10}, {20}, {30}}},
539
540 {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
541 {1, 1, 1}, 4, 4,
542 32, {{10}, {10}, {10}, {2}},
543 {{0}, {10}, {20}, {30}}},
544
545 {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
546 {1, 1, 1}, 4, 4,
547 32, {{10}, {11}, {11}, {0}},
548 {{0}, {10}, {21}, {0}}},
549
550 {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
551 {1, 1, 1}, 4, 4,
552 32, {{8}, {8}, {8}, {8}},
553 {{16}, {8}, {0}, {24}}},
554
555 {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
556 {1, 1, 1}, 4, 4,
557 32, {{8}, {8}, {8}, {8}},
558 {{16}, {8}, {0}, {24}}},
559
560 {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
561 {1, 1, 1}, 4, 4,
562 32, {{8}, {8}, {8}, {8}},
563 {{16}, {8}, {0}, {24}}},
564
565 {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
566 {1, 1, 1}, 4, 4,
567 32, {{8}, {8}, {8}, {8}},
568 {{16}, {8}, {0}, {24}}},
569
570 {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
571 {1, 1, 1}, 4, 4,
572 32, {{8}, {8}, {8}, {8}},
573 {{16}, {8}, {0}, {24}}},
574
575 {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
576 {1, 1, 1}, 4, 4,
577 32, {{0}, {16}, {16}, {0}},
578 {{0}, {16}, {0}, {0}}},
579
580 {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
581 {1, 1, 1}, 4, 4,
582 32, {{0}, {16}, {16}, {0}},
583 {{0}, {16}, {0}, {0}}},
584
585 {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
586 {1, 1, 1}, 4, 4,
587 32, {{0}, {16}, {16}, {0}},
588 {{0}, {16}, {0}, {0}}},
589
590 {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
591 {1, 1, 1}, 4, 4,
592 32, {{0}, {0}, {32}, {0}},
593 {{0}, {0}, {0}, {0}}},
594
595 {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
596 {1, 1, 1}, 4, 4,
597 32, {{0}, {0}, {32}, {0}},
598 {{0}, {0}, {0}, {0}}},
599
600 {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
601 {1, 1, 1}, 4, 4,
602 32, {{0}, {0}, {32}, {0}},
603 {{0}, {0}, {0}, {0}}},
604
605 {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
606 {1, 1, 1}, 4, 4,
607 32, {{0}, {0}, {32}, {0}},
608 {{0}, {0}, {0}, {0}}},
609
610 {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
611 {1, 1, 1}, 4, 4,
612 32, {{0}, {8}, {24}, {0}},
613 {{0}, {24}, {0}, {0}}},
614
615 {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
616 {1, 1, 1}, 4, 4,
617 32, {{0}, {8}, {24}, {0}},
618 {{0}, {24}, {0}, {0}}},
619
620 {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
621 {1, 1, 1}, 4, 4,
622 32, {{0}, {0}, {24}, {0}},
623 {{0}, {0}, {0}, {0}}},
624
625 {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
626 {1, 1, 1}, 4, 4,
627 32, {{0}, {8}, {0}, {0}},
628 {{0}, {24}, {0}, {0}}},
629
630 {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
631 {1, 1, 1}, 2, 2,
632 16, {{0}, {8}, {8}, {0}},
633 {{0}, {8}, {0}, {0}}},
634
635 {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
636 {1, 1, 1}, 2, 2,
637 16, {{0}, {8}, {8}, {0}},
638 {{0}, {8}, {0}, {0}}},
639
640 {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
641 {1, 1, 1}, 2, 2,
642 16, {{0}, {8}, {8}, {0}},
643 {{0}, {8}, {0}, {0}}},
644
645 {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
646 {1, 1, 1}, 2, 2,
647 16, {{0}, {8}, {8}, {0}},
648 {{0}, {8}, {0}, {0}}},
649
650 {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
651 {1, 1, 1}, 2, 2,
652 16, {{0}, {0}, {16}, {0}},
653 {{0}, {0}, {0}, {0}}},
654
655 {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
656 {1, 1, 1}, 2, 2,
657 16, {{0}, {0}, {16}, {0}},
658 {{0}, {0}, {0}, {0}}},
659
660 {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
661 {1, 1, 1}, 2, 2,
662 16, {{0}, {0}, {16}, {0}},
663 {{0}, {0}, {0}, {0}}},
664
665 {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
666 {1, 1, 1}, 2, 2,
667 16, {{0}, {0}, {16}, {0}},
668 {{0}, {0}, {0}, {0}}},
669
670 {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
671 {1, 1, 1}, 2, 2,
672 16, {{0}, {0}, {16}, {0}},
673 {{0}, {0}, {0}, {0}}},
674
675 {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
676 {1, 1, 1}, 1, 1,
677 8, {{0}, {0}, {8}, {0}},
678 {{0}, {0}, {0}, {0}}},
679
680 {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
681 {1, 1, 1}, 1, 1,
682 8, {{0}, {0}, {8}, {0}},
683 {{0}, {0}, {0}, {0}}},
684
685 {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
686 {1, 1, 1}, 1, 1,
687 8, {{0}, {0}, {8}, {0}},
688 {{0}, {0}, {0}, {0}}},
689
690 {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
691 {1, 1, 1}, 1, 1,
692 8, {{0}, {0}, {8}, {0}},
693 {{0}, {0}, {0}, {0}}},
694
695 {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
696 {1, 1, 1}, 1, 1,
697 8, {{0}, {0}, {8}, {0}},
698 {{0}, {0}, {0}, {0}}},
699
700 {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
701 {1, 1, 1}, 1, 1,
702 8, {{0}, {0}, {8}, {0}},
703 {{0}, {0}, {0}, {0}}},
704
705 {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
706 {1, 1, 1}, 4, 4,
707 32, {{9}, {9}, {9}, {5}},
708 {{18}, {9}, {0}, {27}}},
709
710 {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
711 {1, 1, 1}, 2, 2,
712 16, {{0}, {8}, {8}, {0}},
713 {{0}, {8}, {0}, {0}}},
714
715 {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
716 {1, 1, 1}, 2, 2,
717 16, {{0}, {8}, {8}, {0}},
718 {{0}, {8}, {0}, {0}}},
719
720 {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
721 {4, 4, 1}, 8, 8,
722 64, {{0}, {0}, {64}, {0}},
723 {{0}, {0}, {0}, {0}}},
724
725 {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
726 {4, 4, 1}, 8, 8,
727 64, {{0}, {0}, {64}, {0}},
728 {{0}, {0}, {0}, {0}}},
729
730 {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
731 {4, 4, 1}, 16, 16,
732 128, {{0}, {0}, {128}, {0}},
733 {{0}, {0}, {0}, {0}}},
734
735 {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
736 {4, 4, 1}, 16, 16,
737 128, {{0}, {0}, {128}, {0}},
738 {{0}, {0}, {0}, {0}}},
739
740 {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
741 {4, 4, 1}, 16, 16,
742 128, {{0}, {0}, {128}, {0}},
743 {{0}, {0}, {0}, {0}}},
744
745 {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
746 {4, 4, 1}, 16, 16,
747 128, {{0}, {0}, {128}, {0}},
748 {{0}, {0}, {0}, {0}}},
749
750 {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
751 {4, 4, 1}, 8, 8,
752 64, {{0}, {0}, {64}, {0}},
753 {{0}, {0}, {0}, {0}}},
754
755 {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
756 {4, 4, 1}, 8, 8,
757 64, {{0}, {0}, {64}, {0}},
758 {{0}, {0}, {0}, {0}}},
759
760 {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
761 {4, 4, 1}, 8, 8,
762 64, {{0}, {0}, {64}, {0}},
763 {{0}, {0}, {0}, {0}}},
764
765 {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
766 {4, 4, 1}, 16, 16,
767 128, {{0}, {0}, {128}, {0}},
768 {{0}, {0}, {0}, {0}}},
769
770 {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
771 {4, 4, 1}, 16, 16,
772 128, {{0}, {0}, {128}, {0}},
773 {{0}, {0}, {0}, {0}}},
774
775 {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
776 {4, 4, 1}, 16, 16,
777 128, {{0}, {0}, {128}, {0}},
778 {{0}, {0}, {0}, {0}}},
779
780 {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
781 {1, 1, 1}, 4, 4,
782 32, {{10}, {10}, {10}, {2}},
783 {{0}, {10}, {20}, {30}}},
784
785 {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
786 {1, 1, 1}, 4, 4,
787 32, {{8}, {8}, {8}, {8}},
788 {{0}, {8}, {16}, {24}}},
789
790 {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
791 {1, 1, 1}, 4, 4,
792 32, {{8}, {8}, {8}, {8}},
793 {{0}, {8}, {16}, {24}}},
794
795 {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
796 {1, 1, 1}, 4, 4,
797 24, {{8}, {8}, {8}, {0}},
798 {{0}, {8}, {16}, {24}}},
799
800 {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
801 {1, 1, 1}, 4, 4,
802 24, {{8}, {8}, {8}, {0}},
803 {{0}, {8}, {16}, {24}}},
804
805 {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
806 {1, 1, 1}, 2, 2,
807 16, {{0}, {0}, {16}, {0}},
808 {{0}, {0}, {0}, {0}}},
809
810 {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
811 {1, 1, 1}, 4, 4,
812 32, {{0}, {8}, {24}, {0}},
813 {{0}, {24}, {0}, {0}}},
814
815 {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
816 {1, 1, 1}, 4, 4,
817 32, {{0}, {8}, {24}, {0}},
818 {{0}, {24}, {0}, {0}}},
819
820 {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
821 {2, 2, 1}, 6, 2,
822 48, {{0}, {0}, {48}, {0}},
823 {{0}, {0}, {0}, {0}}},
824
825 {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
826 {1, 1, 1}, 16, 16,
827 128, {{32}, {32}, {32}, {32}},
828 {{64}, {32}, {0}, {96}}},
829
830 {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
831 {1, 1, 1}, 8, 8,
832 64, {{16}, {16}, {16}, {16}},
833 {{32}, {16}, {0}, {48}}},
834
835 {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
836 {1, 1, 1}, 8, 8,
837 64, {{16}, {16}, {16}, {16}},
838 {{32}, {16}, {0}, {48}}},
839
840 {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
841 {1, 1, 1}, 8, 8,
842 64, {{0}, {32}, {32}, {0}},
843 {{0}, {32}, {0}, {0}}},
844
845 {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
846 {1, 1, 1}, 4, 4,
847 32, {{10}, {10}, {10}, {2}},
848 {{0}, {10}, {20}, {30}}},
849
850 {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
851 {1, 1, 1}, 4, 4,
852 32, {{8}, {8}, {8}, {8}},
853 {{24}, {16}, {8}, {0}}},
854
855 {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
856 {1, 1, 1}, 4, 4,
857 32, {{0}, {16}, {16}, {0}},
858 {{0}, {16}, {0}, {0}}},
859
860 {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
861 {1, 1, 1}, 4, 4,
862 32, {{0}, {16}, {16}, {0}},
863 {{0}, {0}, {16}, {0}}},
864
865 {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
866 {1, 1, 1}, 4, 4,
867 32, {{16}, {16}, {0}, {0}},
868 {{16}, {0}, {0}, {0}}},
869
870 {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
871 {1, 1, 1}, 4, 4,
872 32, {{0}, {0}, {32}, {0}},
873 {{0}, {0}, {0}, {0}}},
874
875 {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
876 {1, 1, 1}, 2, 2,
877 16, {{8}, {8}, {0}, {0}},
878 {{8}, {0}, {0}, {0}}},
879
880 {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
881 {1, 1, 1}, 2, 2,
882 16, {{0}, {0}, {16}, {0}},
883 {{0}, {0}, {0}, {0}}},
884
885 {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
886 {1, 1, 1}, 2, 2,
887 16, {{0}, {0}, {16}, {0}},
888 {{0}, {0}, {0}, {0}}},
889
890 {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
891 {1, 1, 1}, 1, 1,
892 8, {{0}, {0}, {0}, {8}},
893 {{0}, {0}, {0}, {0}}},
894
895 {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
896 {4, 4, 1}, 8, 8,
897 64, {{0}, {0}, {64}, {0}},
898 {{0}, {0}, {0}, {0}}},
899
900 {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
901 {4, 4, 1}, 16, 16,
902 128, {{0}, {0}, {128}, {0}},
903 {{0}, {0}, {0}, {0}}},
904
905 {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
906 {4, 4, 1}, 16, 16,
907 128, {{0}, {0}, {128}, {0}},
908 {{0}, {0}, {0}, {0}}},
909
910 {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
911 {1, 1, 1}, 2, 2,
912 16, {{5}, {6}, {5}, {0}},
913 {{0}, {5}, {11}, {0}}},
914
915 {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
916 {1, 1, 1}, 2, 2,
917 16, {{5}, {5}, {5}, {1}},
918 {{0}, {5}, {10}, {15}}},
919
920 {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
921 {1, 1, 1}, 4, 4,
922 32, {{8}, {8}, {8}, {8}},
923 {{0}, {8}, {16}, {24}}},
924
925 {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
926 {1, 1, 1}, 4, 4,
927 24, {{8}, {8}, {8}, {0}},
928 {{0}, {8}, {16}, {24}}},
929
930 {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
931 {4, 4, 1}, 8, 8,
932 64, {{0}, {0}, {64}, {0}},
933 {{0}, {0}, {0}, {0}}},
934
935 {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
936 {4, 4, 1}, 16, 16,
937 128, {{0}, {0}, {128}, {0}},
938 {{0}, {0}, {0}, {0}}},
939
940};
941
942static inline u32 clamped_umul32(u32 a, u32 b)
943{
944 uint64_t tmp = (uint64_t) a*b;
945 return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
946}
947
948static inline const struct svga3d_surface_desc *
949svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
950{
951 if (format < ARRAY_SIZE(svga3d_surface_descs))
952 return &svga3d_surface_descs[format];
953
954 return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
955}
956
957/*
958 *----------------------------------------------------------------------
959 *
960 * svga3dsurface_get_mip_size --
961 *
962 * Given a base level size and the mip level, compute the size of
963 * the mip level.
964 *
965 * Results:
966 * See above.
967 *
968 * Side effects:
969 * None.
970 *
971 *----------------------------------------------------------------------
972 */
973
974static inline surf_size_struct
975svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
976{
977 surf_size_struct size;
978
979 size.width = max_t(u32, base_level.width >> mip_level, 1);
980 size.height = max_t(u32, base_level.height >> mip_level, 1);
981 size.depth = max_t(u32, base_level.depth >> mip_level, 1);
982 return size;
983}
984
985static inline void
986svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
987 const surf_size_struct *pixel_size,
988 surf_size_struct *block_size)
989{
990 block_size->width = DIV_ROUND_UP(pixel_size->width,
991 desc->block_size.width);
992 block_size->height = DIV_ROUND_UP(pixel_size->height,
993 desc->block_size.height);
994 block_size->depth = DIV_ROUND_UP(pixel_size->depth,
995 desc->block_size.depth);
996}
997
998static inline bool
999svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
1000{
1001 return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
1002}
1003
1004static inline u32
1005svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
1006 const surf_size_struct *size)
1007{
1008 u32 pitch;
1009 surf_size_struct blocks;
1010
1011 svga3dsurface_get_size_in_blocks(desc, size, &blocks);
1012
1013 pitch = blocks.width * desc->pitch_bytes_per_block;
1014
1015 return pitch;
1016}
1017
1018/*
1019 *-----------------------------------------------------------------------------
1020 *
1021 * svga3dsurface_get_image_buffer_size --
1022 *
1023 * Return the number of bytes of buffer space required to store
1024 * one image of a surface, optionally using the specified pitch.
1025 *
1026 * If pitch is zero, it is assumed that rows are tightly packed.
1027 *
1028 * This function is overflow-safe. If the result would have
1029 * overflowed, instead we return MAX_UINT32.
1030 *
1031 * Results:
1032 * Byte count.
1033 *
1034 * Side effects:
1035 * None.
1036 *
1037 *-----------------------------------------------------------------------------
1038 */
1039
1040static inline u32
1041svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
1042 const surf_size_struct *size,
1043 u32 pitch)
1044{
1045 surf_size_struct image_blocks;
1046 u32 slice_size, total_size;
1047
1048 svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
1049
1050 if (svga3dsurface_is_planar_surface(desc)) {
1051 total_size = clamped_umul32(image_blocks.width,
1052 image_blocks.height);
1053 total_size = clamped_umul32(total_size, image_blocks.depth);
1054 total_size = clamped_umul32(total_size, desc->bytes_per_block);
1055 return total_size;
1056 }
1057
1058 if (pitch == 0)
1059 pitch = svga3dsurface_calculate_pitch(desc, size);
1060
1061 slice_size = clamped_umul32(image_blocks.height, pitch);
1062 total_size = clamped_umul32(slice_size, image_blocks.depth);
1063
1064 return total_size;
1065}
1066
1067static inline u32
1068svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
1069 surf_size_struct base_level_size,
1070 u32 num_mip_levels,
1071 u32 num_layers)
1072{
1073 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
1074 u32 total_size = 0;
1075 u32 mip;
1076
1077 for (mip = 0; mip < num_mip_levels; mip++) {
1078 surf_size_struct size =
1079 svga3dsurface_get_mip_size(base_level_size, mip);
1080 total_size += svga3dsurface_get_image_buffer_size(desc,
1081 &size, 0);
1082 }
1083
1084 return total_size * num_layers;
1085}
1086
1087
1088/**
1089 * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
1090 * in an image (or volume).
1091 *
1092 * @width: The image width in pixels.
1093 * @height: The image height in pixels
1094 */
1095static inline u32
1096svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
1097 u32 width, u32 height,
1098 u32 x, u32 y, u32 z)
1099{
1100 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
1101 const u32 bw = desc->block_size.width, bh = desc->block_size.height;
1102 const u32 bd = desc->block_size.depth;
1103 const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
1104 const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
1105 const u32 offset = (z / bd * imgstride +
1106 y / bh * rowstride +
1107 x / bw * desc->bytes_per_block);
1108 return offset;
1109}
1110
1111
1112static inline u32
1113svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
1114 surf_size_struct baseLevelSize,
1115 u32 numMipLevels,
1116 u32 face,
1117 u32 mip)
1118
1119{
1120 u32 offset;
1121 u32 mipChainBytes;
1122 u32 mipChainBytesToLevel;
1123 u32 i;
1124 const struct svga3d_surface_desc *desc;
1125 surf_size_struct mipSize;
1126 u32 bytes;
1127
1128 desc = svga3dsurface_get_desc(format);
1129
1130 mipChainBytes = 0;
1131 mipChainBytesToLevel = 0;
1132 for (i = 0; i < numMipLevels; i++) {
1133 mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
1134 bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
1135 mipChainBytes += bytes;
1136 if (i < mip)
1137 mipChainBytesToLevel += bytes;
1138 }
1139
1140 offset = mipChainBytes * face + mipChainBytesToLevel;
1141
1142 return offset;
1143}
1144
1145
1146/**
1147 * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
1148 * a ScreenTarget?
1149 * (with just the GBObjects cap-bit
1150 * set)
1151 * @format: format to queried
1152 *
1153 * RETURNS:
1154 * true if queried format is valid for screen targets
1155 */
1156static inline bool
1157svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
1158{
1159 return (format == SVGA3D_X8R8G8B8 ||
1160 format == SVGA3D_A8R8G8B8 ||
1161 format == SVGA3D_R5G6B5 ||
1162 format == SVGA3D_X1R5G5B5 ||
1163 format == SVGA3D_A1R5G5B5 ||
1164 format == SVGA3D_P8);
1165}
1166
1167
1168/**
1169 * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
1170 * a ScreenTarget?
1171 * (with DX10 enabled)
1172 *
1173 * @format: format to queried
1174 *
1175 * Results:
1176 * true if queried format is valid for screen targets
1177 */
1178static inline bool
1179svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
1180{
1181 return (format == SVGA3D_R8G8B8A8_UNORM ||
1182 format == SVGA3D_B8G8R8A8_UNORM ||
1183 format == SVGA3D_B8G8R8X8_UNORM);
1184}
1185
1186
1187/**
1188 * svga3dsurface_is_screen_target_format - Is the specified format usable as a
1189 * ScreenTarget?
1190 * (for some combination of caps)
1191 *
1192 * @format: format to queried
1193 *
1194 * Results:
1195 * true if queried format is valid for screen targets
1196 */
1197static inline bool
1198svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
1199{
1200 if (svga3dsurface_is_gb_screen_target_format(format)) {
1201 return true;
1202 }
1203 return svga3dsurface_is_dx_screen_target_format(format);
1204}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 000000000000..27b33ba88430
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
1/**********************************************************
2 * Copyright 2012-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_types.h --
28 *
29 * SVGA 3d hardware definitions for basic types
30 */
31
32#ifndef _SVGA3D_TYPES_H_
33#define _SVGA3D_TYPES_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41/*
42 * Generic Types
43 */
44
45#define SVGA3D_INVALID_ID ((uint32)-1)
46
47typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
48typedef uint32 SVGA3dColor; /* a, r, g, b */
49
50typedef
51#include "vmware_pack_begin.h"
52struct SVGA3dCopyRect {
53 uint32 x;
54 uint32 y;
55 uint32 w;
56 uint32 h;
57 uint32 srcx;
58 uint32 srcy;
59}
60#include "vmware_pack_end.h"
61SVGA3dCopyRect;
62
63typedef
64#include "vmware_pack_begin.h"
65struct SVGA3dCopyBox {
66 uint32 x;
67 uint32 y;
68 uint32 z;
69 uint32 w;
70 uint32 h;
71 uint32 d;
72 uint32 srcx;
73 uint32 srcy;
74 uint32 srcz;
75}
76#include "vmware_pack_end.h"
77SVGA3dCopyBox;
78
79typedef
80#include "vmware_pack_begin.h"
81struct SVGA3dRect {
82 uint32 x;
83 uint32 y;
84 uint32 w;
85 uint32 h;
86}
87#include "vmware_pack_end.h"
88SVGA3dRect;
89
90typedef
91#include "vmware_pack_begin.h"
92struct {
93 uint32 x;
94 uint32 y;
95 uint32 z;
96 uint32 w;
97 uint32 h;
98 uint32 d;
99}
100#include "vmware_pack_end.h"
101SVGA3dBox;
102
103typedef
104#include "vmware_pack_begin.h"
105struct {
106 uint32 x;
107 uint32 y;
108 uint32 z;
109}
110#include "vmware_pack_end.h"
111SVGA3dPoint;
112
113/*
114 * Surface formats.
115 */
116typedef enum SVGA3dSurfaceFormat {
117 SVGA3D_FORMAT_INVALID = 0,
118
119 SVGA3D_X8R8G8B8 = 1,
120 SVGA3D_FORMAT_MIN = 1,
121
122 SVGA3D_A8R8G8B8 = 2,
123
124 SVGA3D_R5G6B5 = 3,
125 SVGA3D_X1R5G5B5 = 4,
126 SVGA3D_A1R5G5B5 = 5,
127 SVGA3D_A4R4G4B4 = 6,
128
129 SVGA3D_Z_D32 = 7,
130 SVGA3D_Z_D16 = 8,
131 SVGA3D_Z_D24S8 = 9,
132 SVGA3D_Z_D15S1 = 10,
133
134 SVGA3D_LUMINANCE8 = 11,
135 SVGA3D_LUMINANCE4_ALPHA4 = 12,
136 SVGA3D_LUMINANCE16 = 13,
137 SVGA3D_LUMINANCE8_ALPHA8 = 14,
138
139 SVGA3D_DXT1 = 15,
140 SVGA3D_DXT2 = 16,
141 SVGA3D_DXT3 = 17,
142 SVGA3D_DXT4 = 18,
143 SVGA3D_DXT5 = 19,
144
145 SVGA3D_BUMPU8V8 = 20,
146 SVGA3D_BUMPL6V5U5 = 21,
147 SVGA3D_BUMPX8L8V8U8 = 22,
148 SVGA3D_BUMPL8V8U8 = 23,
149
150 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
151 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
152
153 SVGA3D_A2R10G10B10 = 26,
154
155 /* signed formats */
156 SVGA3D_V8U8 = 27,
157 SVGA3D_Q8W8V8U8 = 28,
158 SVGA3D_CxV8U8 = 29,
159
160 /* mixed formats */
161 SVGA3D_X8L8V8U8 = 30,
162 SVGA3D_A2W10V10U10 = 31,
163
164 SVGA3D_ALPHA8 = 32,
165
166 /* Single- and dual-component floating point formats */
167 SVGA3D_R_S10E5 = 33,
168 SVGA3D_R_S23E8 = 34,
169 SVGA3D_RG_S10E5 = 35,
170 SVGA3D_RG_S23E8 = 36,
171
172 SVGA3D_BUFFER = 37,
173
174 SVGA3D_Z_D24X8 = 38,
175
176 SVGA3D_V16U16 = 39,
177
178 SVGA3D_G16R16 = 40,
179 SVGA3D_A16B16G16R16 = 41,
180
181 /* Packed Video formats */
182 SVGA3D_UYVY = 42,
183 SVGA3D_YUY2 = 43,
184
185 /* Planar video formats */
186 SVGA3D_NV12 = 44,
187
188 /* Video format with alpha */
189 SVGA3D_AYUV = 45,
190
191 SVGA3D_R32G32B32A32_TYPELESS = 46,
192 SVGA3D_R32G32B32A32_UINT = 47,
193 SVGA3D_R32G32B32A32_SINT = 48,
194 SVGA3D_R32G32B32_TYPELESS = 49,
195 SVGA3D_R32G32B32_FLOAT = 50,
196 SVGA3D_R32G32B32_UINT = 51,
197 SVGA3D_R32G32B32_SINT = 52,
198 SVGA3D_R16G16B16A16_TYPELESS = 53,
199 SVGA3D_R16G16B16A16_UINT = 54,
200 SVGA3D_R16G16B16A16_SNORM = 55,
201 SVGA3D_R16G16B16A16_SINT = 56,
202 SVGA3D_R32G32_TYPELESS = 57,
203 SVGA3D_R32G32_UINT = 58,
204 SVGA3D_R32G32_SINT = 59,
205 SVGA3D_R32G8X24_TYPELESS = 60,
206 SVGA3D_D32_FLOAT_S8X24_UINT = 61,
207 SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
208 SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
209 SVGA3D_R10G10B10A2_TYPELESS = 64,
210 SVGA3D_R10G10B10A2_UINT = 65,
211 SVGA3D_R11G11B10_FLOAT = 66,
212 SVGA3D_R8G8B8A8_TYPELESS = 67,
213 SVGA3D_R8G8B8A8_UNORM = 68,
214 SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
215 SVGA3D_R8G8B8A8_UINT = 70,
216 SVGA3D_R8G8B8A8_SINT = 71,
217 SVGA3D_R16G16_TYPELESS = 72,
218 SVGA3D_R16G16_UINT = 73,
219 SVGA3D_R16G16_SINT = 74,
220 SVGA3D_R32_TYPELESS = 75,
221 SVGA3D_D32_FLOAT = 76,
222 SVGA3D_R32_UINT = 77,
223 SVGA3D_R32_SINT = 78,
224 SVGA3D_R24G8_TYPELESS = 79,
225 SVGA3D_D24_UNORM_S8_UINT = 80,
226 SVGA3D_R24_UNORM_X8_TYPELESS = 81,
227 SVGA3D_X24_TYPELESS_G8_UINT = 82,
228 SVGA3D_R8G8_TYPELESS = 83,
229 SVGA3D_R8G8_UNORM = 84,
230 SVGA3D_R8G8_UINT = 85,
231 SVGA3D_R8G8_SINT = 86,
232 SVGA3D_R16_TYPELESS = 87,
233 SVGA3D_R16_UNORM = 88,
234 SVGA3D_R16_UINT = 89,
235 SVGA3D_R16_SNORM = 90,
236 SVGA3D_R16_SINT = 91,
237 SVGA3D_R8_TYPELESS = 92,
238 SVGA3D_R8_UNORM = 93,
239 SVGA3D_R8_UINT = 94,
240 SVGA3D_R8_SNORM = 95,
241 SVGA3D_R8_SINT = 96,
242 SVGA3D_P8 = 97,
243 SVGA3D_R9G9B9E5_SHAREDEXP = 98,
244 SVGA3D_R8G8_B8G8_UNORM = 99,
245 SVGA3D_G8R8_G8B8_UNORM = 100,
246 SVGA3D_BC1_TYPELESS = 101,
247 SVGA3D_BC1_UNORM_SRGB = 102,
248 SVGA3D_BC2_TYPELESS = 103,
249 SVGA3D_BC2_UNORM_SRGB = 104,
250 SVGA3D_BC3_TYPELESS = 105,
251 SVGA3D_BC3_UNORM_SRGB = 106,
252 SVGA3D_BC4_TYPELESS = 107,
253 SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */
254 SVGA3D_BC4_SNORM = 109,
255 SVGA3D_BC5_TYPELESS = 110,
256 SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */
257 SVGA3D_BC5_SNORM = 112,
258 SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
259 SVGA3D_B8G8R8A8_TYPELESS = 114,
260 SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
261 SVGA3D_B8G8R8X8_TYPELESS = 116,
262 SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
263
264 /* Advanced depth formats. */
265 SVGA3D_Z_DF16 = 118,
266 SVGA3D_Z_DF24 = 119,
267 SVGA3D_Z_D24S8_INT = 120,
268
269 /* Planar video formats. */
270 SVGA3D_YV12 = 121,
271
272 SVGA3D_R32G32B32A32_FLOAT = 122,
273 SVGA3D_R16G16B16A16_FLOAT = 123,
274 SVGA3D_R16G16B16A16_UNORM = 124,
275 SVGA3D_R32G32_FLOAT = 125,
276 SVGA3D_R10G10B10A2_UNORM = 126,
277 SVGA3D_R8G8B8A8_SNORM = 127,
278 SVGA3D_R16G16_FLOAT = 128,
279 SVGA3D_R16G16_UNORM = 129,
280 SVGA3D_R16G16_SNORM = 130,
281 SVGA3D_R32_FLOAT = 131,
282 SVGA3D_R8G8_SNORM = 132,
283 SVGA3D_R16_FLOAT = 133,
284 SVGA3D_D16_UNORM = 134,
285 SVGA3D_A8_UNORM = 135,
286 SVGA3D_BC1_UNORM = 136,
287 SVGA3D_BC2_UNORM = 137,
288 SVGA3D_BC3_UNORM = 138,
289 SVGA3D_B5G6R5_UNORM = 139,
290 SVGA3D_B5G5R5A1_UNORM = 140,
291 SVGA3D_B8G8R8A8_UNORM = 141,
292 SVGA3D_B8G8R8X8_UNORM = 142,
293 SVGA3D_BC4_UNORM = 143,
294 SVGA3D_BC5_UNORM = 144,
295
296 SVGA3D_FORMAT_MAX
297} SVGA3dSurfaceFormat;
298
299typedef enum SVGA3dSurfaceFlags {
300 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
301
302 /*
303 * HINT flags are not enforced by the device but are useful for
304 * performance.
305 */
306 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
307 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
308 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
309 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
310 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
311 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
312 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
313 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
314 SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
315 SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
316 SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
317
318 /*
319 * Is this surface using a base-level pitch for it's mob backing?
320 *
321 * This flag is not intended to be set by guest-drivers, but is instead
322 * set by the device when the surface is bound to a mob with a specified
323 * pitch.
324 */
325 SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
326
327 SVGA3D_SURFACE_INACTIVE = (1 << 13),
328 SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
329 SVGA3D_SURFACE_VOLUME = (1 << 15),
330
331 /*
332 * Required to be set on a surface to bind it to a screen target.
333 */
334 SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
335
336 /*
337 * Align images in the guest-backing mob to 16-bytes.
338 */
339 SVGA3D_SURFACE_ALIGN16 = (1 << 17),
340
341 SVGA3D_SURFACE_1D = (1 << 18),
342 SVGA3D_SURFACE_ARRAY = (1 << 19),
343
344 /*
345 * Bind flags.
346 * These are enforced for any surface defined with DefineGBSurface_v2.
347 */
348 SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
349 SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
350 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
351 SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
352 SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
353 SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
354 SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
355
356 /*
357 * A note on staging flags:
358 *
359 * The STAGING flags notes that the surface will not be used directly by the
360 * drawing pipeline, i.e. that it will not be bound to any bind point.
361 * Staging surfaces may be used by copy operations to move data in and out
362 * of other surfaces.
363 *
364 * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
365 * updates indirectly, i.e. the surface will not be updated directly, but
366 * will receive copies from staging surfaces.
367 */
368 SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
369 SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
370 SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
371
372 /*
373 * Setting this flag allow this surface to be used with the
374 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
375 * buffer surfaces, an no bind flags are allowed to be set on surfaces
376 * with this flag.
377 */
378 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
379
380 /*
381 * Marker for the last defined bit.
382 */
383 SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
384} SVGA3dSurfaceFlags;
385
386#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
387 ( SVGA3D_SURFACE_MOB_PITCH | \
388 SVGA3D_SURFACE_SCREENTARGET | \
389 SVGA3D_SURFACE_ALIGN16 | \
390 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
391 SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
392 SVGA3D_SURFACE_STAGING_UPLOAD | \
393 SVGA3D_SURFACE_STAGING_DOWNLOAD | \
394 SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
395 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
396 )
397
398#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
399 ( SVGA3D_SURFACE_CUBEMAP | \
400 SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
401 SVGA3D_SURFACE_AUTOGENMIPMAPS | \
402 SVGA3D_SURFACE_DECODE_RENDERTARGET | \
403 SVGA3D_SURFACE_VOLUME | \
404 SVGA3D_SURFACE_1D | \
405 SVGA3D_SURFACE_ARRAY | \
406 SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
407 SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
408 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
409 SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
410 SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
411 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
412 )
413
414#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
415 ( SVGA3D_SURFACE_CUBEMAP | \
416 SVGA3D_SURFACE_AUTOGENMIPMAPS | \
417 SVGA3D_SURFACE_DECODE_RENDERTARGET | \
418 SVGA3D_SURFACE_VOLUME | \
419 SVGA3D_SURFACE_1D | \
420 SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
421 SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
422 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
423 SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
424 SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
425 SVGA3D_SURFACE_INACTIVE | \
426 SVGA3D_SURFACE_STAGING_UPLOAD | \
427 SVGA3D_SURFACE_STAGING_DOWNLOAD | \
428 SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
429 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
430 )
431
432#define SVGA3D_SURFACE_DX_ONLY_MASK \
433 ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
434 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
435
436#define SVGA3D_SURFACE_STAGING_MASK \
437 ( SVGA3D_SURFACE_STAGING_UPLOAD | \
438 SVGA3D_SURFACE_STAGING_DOWNLOAD \
439 )
440
441#define SVGA3D_SURFACE_BIND_MASK \
442 ( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
443 SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
444 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
445 SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
446 SVGA3D_SURFACE_BIND_RENDER_TARGET | \
447 SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
448 SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
449 )
450
451typedef enum {
452 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
453 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
454 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
455 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
456 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
457 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
458 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
459
460/*
461 * This format can be used as a render target if the current display mode
462 * is the same depth if the alpha channel is ignored. e.g. if the device
463 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
464 * format op list entry for A8R8G8B8 should have this cap.
465 */
466 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
467
468/*
469 * This format contains DirectDraw support (including Flip). This flag
470 * should not to be set on alpha formats.
471 */
472 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
473
474/*
475 * The rasterizer can support some level of Direct3D support in this format
476 * and implies that the driver can create a Context in this mode (for some
477 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
478 * flag must also be set.
479 */
480 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
481
482/*
483 * This is set for a private format when the driver has put the bpp in
484 * the structure.
485 */
486 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
487
488/*
489 * Indicates that this format can be converted to any RGB format for which
490 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
491 */
492 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
493
494/*
495 * Indicates that this format can be used to create offscreen plain surfaces.
496 */
497 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
498
499/*
500 * Indicated that this format can be read as an SRGB texture (meaning that the
501 * sampler will linearize the looked up data)
502 */
503 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
504
505/*
506 * Indicates that this format can be used in the bumpmap instructions
507 */
508 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
509
510/*
511 * Indicates that this format can be sampled by the displacement map sampler
512 */
513 SVGA3DFORMAT_OP_DMAP = 0x00020000,
514
515/*
516 * Indicates that this format cannot be used with texture filtering
517 */
518 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
519
520/*
521 * Indicates that format conversions are supported to this RGB format if
522 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
523 */
524 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
525
526/*
527 * Indicated that this format can be written as an SRGB target
528 * (meaning that the pixel pipe will DE-linearize data on output to format)
529 */
530 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
531
532/*
533 * Indicates that this format cannot be used with alpha blending
534 */
535 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
536
537/*
538 * Indicates that the device can auto-generated sublevels for resources
539 * of this format
540 */
541 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
542
543/*
544 * Indicates that this format can be used by vertex texture sampler
545 */
546 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
547
548/*
549 * Indicates that this format supports neither texture coordinate
550 * wrap modes, nor mipmapping.
551 */
552 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
553} SVGA3dFormatOp;
554
555#define SVGA3D_FORMAT_POSITIVE \
556 (SVGA3DFORMAT_OP_TEXTURE | \
557 SVGA3DFORMAT_OP_VOLUMETEXTURE | \
558 SVGA3DFORMAT_OP_CUBETEXTURE | \
559 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \
560 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \
561 SVGA3DFORMAT_OP_ZSTENCIL | \
562 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \
563 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
564 SVGA3DFORMAT_OP_DISPLAYMODE | \
565 SVGA3DFORMAT_OP_3DACCELERATION | \
566 SVGA3DFORMAT_OP_PIXELSIZE | \
567 SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \
568 SVGA3DFORMAT_OP_OFFSCREENPLAIN | \
569 SVGA3DFORMAT_OP_SRGBREAD | \
570 SVGA3DFORMAT_OP_BUMPMAP | \
571 SVGA3DFORMAT_OP_DMAP | \
572 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \
573 SVGA3DFORMAT_OP_SRGBWRITE | \
574 SVGA3DFORMAT_OP_AUTOGENMIPMAP | \
575 SVGA3DFORMAT_OP_VERTEXTEXTURE)
576
577#define SVGA3D_FORMAT_NEGATIVE \
578 (SVGA3DFORMAT_OP_NOFILTER | \
579 SVGA3DFORMAT_OP_NOALPHABLEND | \
580 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
581
582/*
583 * This structure is a conversion of SVGA3DFORMAT_OP_*
584 * Entries must be located at the same position.
585 */
586typedef union {
587 uint32 value;
588 struct {
589 uint32 texture : 1;
590 uint32 volumeTexture : 1;
591 uint32 cubeTexture : 1;
592 uint32 offscreenRenderTarget : 1;
593 uint32 sameFormatRenderTarget : 1;
594 uint32 unknown1 : 1;
595 uint32 zStencil : 1;
596 uint32 zStencilArbitraryDepth : 1;
597 uint32 sameFormatUpToAlpha : 1;
598 uint32 unknown2 : 1;
599 uint32 displayMode : 1;
600 uint32 acceleration3d : 1;
601 uint32 pixelSize : 1;
602 uint32 convertToARGB : 1;
603 uint32 offscreenPlain : 1;
604 uint32 sRGBRead : 1;
605 uint32 bumpMap : 1;
606 uint32 dmap : 1;
607 uint32 noFilter : 1;
608 uint32 memberOfGroupARGB : 1;
609 uint32 sRGBWrite : 1;
610 uint32 noAlphaBlend : 1;
611 uint32 autoGenMipMap : 1;
612 uint32 vertexTexture : 1;
613 uint32 noTexCoordWrapNorMip : 1;
614 };
615} SVGA3dSurfaceFormatCaps;
616
617/*
618 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
619 * must fit in a uint32.
620 */
621
622typedef enum {
623 SVGA3D_RS_INVALID = 0,
624 SVGA3D_RS_MIN = 1,
625 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
626 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
627 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
628 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
629 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
630 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
631 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
632 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
633 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
634 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
635 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
636 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
637 SVGA3D_RS_STENCILREF = 13, /* uint32 */
638 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
639 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
640 SVGA3D_RS_FOGSTART = 16, /* float */
641 SVGA3D_RS_FOGEND = 17, /* float */
642 SVGA3D_RS_FOGDENSITY = 18, /* float */
643 SVGA3D_RS_POINTSIZE = 19, /* float */
644 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
645 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
646 SVGA3D_RS_POINTSCALE_A = 22, /* float */
647 SVGA3D_RS_POINTSCALE_B = 23, /* float */
648 SVGA3D_RS_POINTSCALE_C = 24, /* float */
649 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
650 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
651 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
652 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
653 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
654 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
655 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
656 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
657 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
658 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
659 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
660 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
661 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
662 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
663 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
664 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
665 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
666 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
667 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
668 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
669 SVGA3D_RS_ZBIAS = 45, /* float */
670 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
671 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
672 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
673 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
674 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
675 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
676 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
677 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
678 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
679 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
680 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
681 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
682 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
683 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
684 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
685 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
686 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
687 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
688 SVGA3D_RS_DEPTHBIAS = 64, /* float */
689
690
691 /*
692 * Output Gamma Level
693 *
694 * Output gamma effects the gamma curve of colors that are output from the
695 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
696 * value is <= 0.0, gamma correction is ignored and linear color space is
697 * used.
698 */
699
700 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
701 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
702 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
703 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
704 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
705 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
706 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
707 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
708 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
709 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
710 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
711 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
712 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
713 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
714 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
715 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
716 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
717 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
718 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
719 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
720 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
721 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
722 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
723 SVGA3D_RS_TWEENFACTOR = 88, /* float */
724 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
725 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
726 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
727 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
728 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
729 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
730 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
731 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
732 SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
733 SVGA3D_RS_LINEWIDTH = 98, /* float */
734 SVGA3D_RS_MAX
735} SVGA3dRenderStateName;
736
737typedef enum {
738 SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
739 SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
740 SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
741 SVGA3D_TRANSPARENCYANTIALIAS_MAX
742} SVGA3dTransparencyAntialiasType;
743
744typedef enum {
745 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
746 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
747 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
748 SVGA3D_VERTEXMATERIAL_MAX = 3,
749} SVGA3dVertexMaterial;
750
751typedef enum {
752 SVGA3D_FILLMODE_INVALID = 0,
753 SVGA3D_FILLMODE_MIN = 1,
754 SVGA3D_FILLMODE_POINT = 1,
755 SVGA3D_FILLMODE_LINE = 2,
756 SVGA3D_FILLMODE_FILL = 3,
757 SVGA3D_FILLMODE_MAX
758} SVGA3dFillModeType;
759
760
761typedef
762#include "vmware_pack_begin.h"
763union {
764 struct {
765 uint16 mode; /* SVGA3dFillModeType */
766 uint16 face; /* SVGA3dFace */
767 };
768 uint32 uintValue;
769}
770#include "vmware_pack_end.h"
771SVGA3dFillMode;
772
773typedef enum {
774 SVGA3D_SHADEMODE_INVALID = 0,
775 SVGA3D_SHADEMODE_FLAT = 1,
776 SVGA3D_SHADEMODE_SMOOTH = 2,
777 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
778 SVGA3D_SHADEMODE_MAX
779} SVGA3dShadeMode;
780
781typedef
782#include "vmware_pack_begin.h"
783union {
784 struct {
785 uint16 repeat;
786 uint16 pattern;
787 };
788 uint32 uintValue;
789}
790#include "vmware_pack_end.h"
791SVGA3dLinePattern;
792
793typedef enum {
794 SVGA3D_BLENDOP_INVALID = 0,
795 SVGA3D_BLENDOP_MIN = 1,
796 SVGA3D_BLENDOP_ZERO = 1,
797 SVGA3D_BLENDOP_ONE = 2,
798 SVGA3D_BLENDOP_SRCCOLOR = 3,
799 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
800 SVGA3D_BLENDOP_SRCALPHA = 5,
801 SVGA3D_BLENDOP_INVSRCALPHA = 6,
802 SVGA3D_BLENDOP_DESTALPHA = 7,
803 SVGA3D_BLENDOP_INVDESTALPHA = 8,
804 SVGA3D_BLENDOP_DESTCOLOR = 9,
805 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
806 SVGA3D_BLENDOP_SRCALPHASAT = 11,
807 SVGA3D_BLENDOP_BLENDFACTOR = 12,
808 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
809 SVGA3D_BLENDOP_SRC1COLOR = 14,
810 SVGA3D_BLENDOP_INVSRC1COLOR = 15,
811 SVGA3D_BLENDOP_SRC1ALPHA = 16,
812 SVGA3D_BLENDOP_INVSRC1ALPHA = 17,
813 SVGA3D_BLENDOP_BLENDFACTORALPHA = 18,
814 SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
815 SVGA3D_BLENDOP_MAX
816} SVGA3dBlendOp;
817
818typedef enum {
819 SVGA3D_BLENDEQ_INVALID = 0,
820 SVGA3D_BLENDEQ_MIN = 1,
821 SVGA3D_BLENDEQ_ADD = 1,
822 SVGA3D_BLENDEQ_SUBTRACT = 2,
823 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
824 SVGA3D_BLENDEQ_MINIMUM = 4,
825 SVGA3D_BLENDEQ_MAXIMUM = 5,
826 SVGA3D_BLENDEQ_MAX
827} SVGA3dBlendEquation;
828
829typedef enum {
830 SVGA3D_DX11_LOGICOP_MIN = 0,
831 SVGA3D_DX11_LOGICOP_CLEAR = 0,
832 SVGA3D_DX11_LOGICOP_SET = 1,
833 SVGA3D_DX11_LOGICOP_COPY = 2,
834 SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
835 SVGA3D_DX11_LOGICOP_NOOP = 4,
836 SVGA3D_DX11_LOGICOP_INVERT = 5,
837 SVGA3D_DX11_LOGICOP_AND = 6,
838 SVGA3D_DX11_LOGICOP_NAND = 7,
839 SVGA3D_DX11_LOGICOP_OR = 8,
840 SVGA3D_DX11_LOGICOP_NOR = 9,
841 SVGA3D_DX11_LOGICOP_XOR = 10,
842 SVGA3D_DX11_LOGICOP_EQUIV = 11,
843 SVGA3D_DX11_LOGICOP_AND_REVERSE = 12,
844 SVGA3D_DX11_LOGICOP_AND_INVERTED = 13,
845 SVGA3D_DX11_LOGICOP_OR_REVERSE = 14,
846 SVGA3D_DX11_LOGICOP_OR_INVERTED = 15,
847 SVGA3D_DX11_LOGICOP_MAX
848} SVGA3dDX11LogicOp;
849
850typedef enum {
851 SVGA3D_FRONTWINDING_INVALID = 0,
852 SVGA3D_FRONTWINDING_CW = 1,
853 SVGA3D_FRONTWINDING_CCW = 2,
854 SVGA3D_FRONTWINDING_MAX
855} SVGA3dFrontWinding;
856
857typedef enum {
858 SVGA3D_FACE_INVALID = 0,
859 SVGA3D_FACE_NONE = 1,
860 SVGA3D_FACE_MIN = 1,
861 SVGA3D_FACE_FRONT = 2,
862 SVGA3D_FACE_BACK = 3,
863 SVGA3D_FACE_FRONT_BACK = 4,
864 SVGA3D_FACE_MAX
865} SVGA3dFace;
866
867/*
868 * The order and the values should not be changed
869 */
870
871typedef enum {
872 SVGA3D_CMP_INVALID = 0,
873 SVGA3D_CMP_NEVER = 1,
874 SVGA3D_CMP_LESS = 2,
875 SVGA3D_CMP_EQUAL = 3,
876 SVGA3D_CMP_LESSEQUAL = 4,
877 SVGA3D_CMP_GREATER = 5,
878 SVGA3D_CMP_NOTEQUAL = 6,
879 SVGA3D_CMP_GREATEREQUAL = 7,
880 SVGA3D_CMP_ALWAYS = 8,
881 SVGA3D_CMP_MAX
882} SVGA3dCmpFunc;
883
884/*
885 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
886 * the fog factor to be specified in the alpha component of the specular
887 * (a.k.a. secondary) vertex color.
888 */
889typedef enum {
890 SVGA3D_FOGFUNC_INVALID = 0,
891 SVGA3D_FOGFUNC_EXP = 1,
892 SVGA3D_FOGFUNC_EXP2 = 2,
893 SVGA3D_FOGFUNC_LINEAR = 3,
894 SVGA3D_FOGFUNC_PER_VERTEX = 4
895} SVGA3dFogFunction;
896
897/*
898 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
899 * or per-pixel basis.
900 */
901typedef enum {
902 SVGA3D_FOGTYPE_INVALID = 0,
903 SVGA3D_FOGTYPE_VERTEX = 1,
904 SVGA3D_FOGTYPE_PIXEL = 2,
905 SVGA3D_FOGTYPE_MAX = 3
906} SVGA3dFogType;
907
908/*
909 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
910 * computed using the eye Z value of each pixel (or vertex), whereas range-
911 * based fog is computed using the actual distance (range) to the eye.
912 */
913typedef enum {
914 SVGA3D_FOGBASE_INVALID = 0,
915 SVGA3D_FOGBASE_DEPTHBASED = 1,
916 SVGA3D_FOGBASE_RANGEBASED = 2,
917 SVGA3D_FOGBASE_MAX = 3
918} SVGA3dFogBase;
919
920typedef enum {
921 SVGA3D_STENCILOP_INVALID = 0,
922 SVGA3D_STENCILOP_MIN = 1,
923 SVGA3D_STENCILOP_KEEP = 1,
924 SVGA3D_STENCILOP_ZERO = 2,
925 SVGA3D_STENCILOP_REPLACE = 3,
926 SVGA3D_STENCILOP_INCRSAT = 4,
927 SVGA3D_STENCILOP_DECRSAT = 5,
928 SVGA3D_STENCILOP_INVERT = 6,
929 SVGA3D_STENCILOP_INCR = 7,
930 SVGA3D_STENCILOP_DECR = 8,
931 SVGA3D_STENCILOP_MAX
932} SVGA3dStencilOp;
933
934typedef enum {
935 SVGA3D_CLIPPLANE_0 = (1 << 0),
936 SVGA3D_CLIPPLANE_1 = (1 << 1),
937 SVGA3D_CLIPPLANE_2 = (1 << 2),
938 SVGA3D_CLIPPLANE_3 = (1 << 3),
939 SVGA3D_CLIPPLANE_4 = (1 << 4),
940 SVGA3D_CLIPPLANE_5 = (1 << 5),
941} SVGA3dClipPlanes;
942
943typedef enum {
944 SVGA3D_CLEAR_COLOR = 0x1,
945 SVGA3D_CLEAR_DEPTH = 0x2,
946 SVGA3D_CLEAR_STENCIL = 0x4,
947
948 /*
949 * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
950 * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
951 * bit will be ignored.
952 */
953 SVGA3D_CLEAR_COLORFILL = 0x8
954} SVGA3dClearFlag;
955
956typedef enum {
957 SVGA3D_RT_DEPTH = 0,
958 SVGA3D_RT_MIN = 0,
959 SVGA3D_RT_STENCIL = 1,
960 SVGA3D_RT_COLOR0 = 2,
961 SVGA3D_RT_COLOR1 = 3,
962 SVGA3D_RT_COLOR2 = 4,
963 SVGA3D_RT_COLOR3 = 5,
964 SVGA3D_RT_COLOR4 = 6,
965 SVGA3D_RT_COLOR5 = 7,
966 SVGA3D_RT_COLOR6 = 8,
967 SVGA3D_RT_COLOR7 = 9,
968 SVGA3D_RT_MAX,
969 SVGA3D_RT_INVALID = ((uint32)-1),
970} SVGA3dRenderTargetType;
971
972#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
973
974typedef
975#include "vmware_pack_begin.h"
976union {
977 struct {
978 uint32 red : 1;
979 uint32 green : 1;
980 uint32 blue : 1;
981 uint32 alpha : 1;
982 };
983 uint32 uintValue;
984}
985#include "vmware_pack_end.h"
986SVGA3dColorMask;
987
988typedef enum {
989 SVGA3D_VBLEND_DISABLE = 0,
990 SVGA3D_VBLEND_1WEIGHT = 1,
991 SVGA3D_VBLEND_2WEIGHT = 2,
992 SVGA3D_VBLEND_3WEIGHT = 3,
993 SVGA3D_VBLEND_MAX = 4,
994} SVGA3dVertexBlendFlags;
995
996typedef enum {
997 SVGA3D_WRAPCOORD_0 = 1 << 0,
998 SVGA3D_WRAPCOORD_1 = 1 << 1,
999 SVGA3D_WRAPCOORD_2 = 1 << 2,
1000 SVGA3D_WRAPCOORD_3 = 1 << 3,
1001 SVGA3D_WRAPCOORD_ALL = 0xF,
1002} SVGA3dWrapFlags;
1003
1004/*
1005 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
1006 * must fit in a uint32.
1007 */
1008
1009typedef enum {
1010 SVGA3D_TS_INVALID = 0,
1011 SVGA3D_TS_MIN = 1,
1012 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
1013 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
1014 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
1015 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
1016 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
1017 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
1018 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
1019 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
1020 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
1021 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
1022 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
1023 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
1024 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
1025 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
1026 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
1027 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
1028 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
1029 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
1030 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
1031 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
1032 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
1033 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
1034 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
1035 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
1036
1037
1038 /*
1039 * Sampler Gamma Level
1040 *
1041 * Sampler gamma effects the color of samples taken from the sampler. A
1042 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
1043 * gamma value is ignored and a linear space is used.
1044 */
1045
1046 SVGA3D_TS_GAMMA = 25, /* float */
1047 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
1048 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
1049 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
1050 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
1051 SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */
1052 SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */
1053 SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */
1054 SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */
1055 SVGA3D_TS_MAX
1056} SVGA3dTextureStateName;
1057
1058typedef enum {
1059 SVGA3D_TC_INVALID = 0,
1060 SVGA3D_TC_DISABLE = 1,
1061 SVGA3D_TC_SELECTARG1 = 2,
1062 SVGA3D_TC_SELECTARG2 = 3,
1063 SVGA3D_TC_MODULATE = 4,
1064 SVGA3D_TC_ADD = 5,
1065 SVGA3D_TC_ADDSIGNED = 6,
1066 SVGA3D_TC_SUBTRACT = 7,
1067 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
1068 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
1069 SVGA3D_TC_BLENDCURRENTALPHA = 10,
1070 SVGA3D_TC_BLENDFACTORALPHA = 11,
1071 SVGA3D_TC_MODULATE2X = 12,
1072 SVGA3D_TC_MODULATE4X = 13,
1073 SVGA3D_TC_DSDT = 14,
1074 SVGA3D_TC_DOTPRODUCT3 = 15,
1075 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
1076 SVGA3D_TC_ADDSIGNED2X = 17,
1077 SVGA3D_TC_ADDSMOOTH = 18,
1078 SVGA3D_TC_PREMODULATE = 19,
1079 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
1080 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
1081 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
1082 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
1083 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
1084 SVGA3D_TC_MULTIPLYADD = 25,
1085 SVGA3D_TC_LERP = 26,
1086 SVGA3D_TC_MAX
1087} SVGA3dTextureCombiner;
1088
1089#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
1090
1091typedef enum {
1092 SVGA3D_TEX_ADDRESS_INVALID = 0,
1093 SVGA3D_TEX_ADDRESS_MIN = 1,
1094 SVGA3D_TEX_ADDRESS_WRAP = 1,
1095 SVGA3D_TEX_ADDRESS_MIRROR = 2,
1096 SVGA3D_TEX_ADDRESS_CLAMP = 3,
1097 SVGA3D_TEX_ADDRESS_BORDER = 4,
1098 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
1099 SVGA3D_TEX_ADDRESS_EDGE = 6,
1100 SVGA3D_TEX_ADDRESS_MAX
1101} SVGA3dTextureAddress;
1102
1103/*
1104 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
1105 * disabled, and the rasterizer should use the magnification filter instead.
1106 */
1107typedef enum {
1108 SVGA3D_TEX_FILTER_NONE = 0,
1109 SVGA3D_TEX_FILTER_MIN = 0,
1110 SVGA3D_TEX_FILTER_NEAREST = 1,
1111 SVGA3D_TEX_FILTER_LINEAR = 2,
1112 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
1113 SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
1114 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
1115 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
1116 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
1117 SVGA3D_TEX_FILTER_MAX
1118} SVGA3dTextureFilter;
1119
1120typedef enum {
1121 SVGA3D_TEX_TRANSFORM_OFF = 0,
1122 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
1123 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
1124 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
1125 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
1126 SVGA3D_TEX_PROJECTED = (1 << 15),
1127} SVGA3dTexTransformFlags;
1128
1129typedef enum {
1130 SVGA3D_TEXCOORD_GEN_OFF = 0,
1131 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
1132 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
1133 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
1134 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
1135 SVGA3D_TEXCOORD_GEN_MAX
1136} SVGA3dTextureCoordGen;
1137
1138/*
1139 * Texture argument constants for texture combiner
1140 */
1141typedef enum {
1142 SVGA3D_TA_INVALID = 0,
1143 SVGA3D_TA_TFACTOR = 1,
1144 SVGA3D_TA_PREVIOUS = 2,
1145 SVGA3D_TA_DIFFUSE = 3,
1146 SVGA3D_TA_TEXTURE = 4,
1147 SVGA3D_TA_SPECULAR = 5,
1148 SVGA3D_TA_CONSTANT = 6,
1149 SVGA3D_TA_MAX
1150} SVGA3dTextureArgData;
1151
1152#define SVGA3D_TM_MASK_LEN 4
1153
1154/* Modifiers for texture argument constants defined above. */
1155typedef enum {
1156 SVGA3D_TM_NONE = 0,
1157 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
1158 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
1159} SVGA3dTextureArgModifier;
1160
1161/*
1162 * Vertex declarations
1163 *
1164 * Notes:
1165 *
1166 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
1167 * draw with any POSITIONT vertex arrays, the programmable vertex
1168 * pipeline will be implicitly disabled. Drawing will take place as if
1169 * no vertex shader was bound.
1170 */
1171
1172typedef enum {
1173 SVGA3D_DECLUSAGE_POSITION = 0,
1174 SVGA3D_DECLUSAGE_BLENDWEIGHT,
1175 SVGA3D_DECLUSAGE_BLENDINDICES,
1176 SVGA3D_DECLUSAGE_NORMAL,
1177 SVGA3D_DECLUSAGE_PSIZE,
1178 SVGA3D_DECLUSAGE_TEXCOORD,
1179 SVGA3D_DECLUSAGE_TANGENT,
1180 SVGA3D_DECLUSAGE_BINORMAL,
1181 SVGA3D_DECLUSAGE_TESSFACTOR,
1182 SVGA3D_DECLUSAGE_POSITIONT,
1183 SVGA3D_DECLUSAGE_COLOR,
1184 SVGA3D_DECLUSAGE_FOG,
1185 SVGA3D_DECLUSAGE_DEPTH,
1186 SVGA3D_DECLUSAGE_SAMPLE,
1187 SVGA3D_DECLUSAGE_MAX
1188} SVGA3dDeclUsage;
1189
1190typedef enum {
1191 SVGA3D_DECLMETHOD_DEFAULT = 0,
1192 SVGA3D_DECLMETHOD_PARTIALU,
1193 SVGA3D_DECLMETHOD_PARTIALV,
1194 SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
1195 SVGA3D_DECLMETHOD_UV,
1196 SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
1197 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
1198 /* map */
1199} SVGA3dDeclMethod;
1200
1201typedef enum {
1202 SVGA3D_DECLTYPE_FLOAT1 = 0,
1203 SVGA3D_DECLTYPE_FLOAT2 = 1,
1204 SVGA3D_DECLTYPE_FLOAT3 = 2,
1205 SVGA3D_DECLTYPE_FLOAT4 = 3,
1206 SVGA3D_DECLTYPE_D3DCOLOR = 4,
1207 SVGA3D_DECLTYPE_UBYTE4 = 5,
1208 SVGA3D_DECLTYPE_SHORT2 = 6,
1209 SVGA3D_DECLTYPE_SHORT4 = 7,
1210 SVGA3D_DECLTYPE_UBYTE4N = 8,
1211 SVGA3D_DECLTYPE_SHORT2N = 9,
1212 SVGA3D_DECLTYPE_SHORT4N = 10,
1213 SVGA3D_DECLTYPE_USHORT2N = 11,
1214 SVGA3D_DECLTYPE_USHORT4N = 12,
1215 SVGA3D_DECLTYPE_UDEC3 = 13,
1216 SVGA3D_DECLTYPE_DEC3N = 14,
1217 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
1218 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
1219 SVGA3D_DECLTYPE_MAX,
1220} SVGA3dDeclType;
1221
1222/*
1223 * This structure is used for the divisor for geometry instancing;
1224 * it's a direct translation of the Direct3D equivalent.
1225 */
1226typedef union {
1227 struct {
1228 /*
1229 * For index data, this number represents the number of instances to draw.
1230 * For instance data, this number represents the number of
1231 * instances/vertex in this stream
1232 */
1233 uint32 count : 30;
1234
1235 /*
1236 * This is 1 if this is supposed to be the data that is repeated for
1237 * every instance.
1238 */
1239 uint32 indexedData : 1;
1240
1241 /*
1242 * This is 1 if this is supposed to be the per-instance data.
1243 */
1244 uint32 instanceData : 1;
1245 };
1246
1247 uint32 value;
1248} SVGA3dVertexDivisor;
1249
1250typedef enum {
1251 /*
1252 * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
1253 *
1254 * List MIN second so debuggers will think INVALID is
1255 * the correct name.
1256 */
1257 SVGA3D_PRIMITIVE_INVALID = 0,
1258 SVGA3D_PRIMITIVE_MIN = 0,
1259 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
1260 SVGA3D_PRIMITIVE_POINTLIST = 2,
1261 SVGA3D_PRIMITIVE_LINELIST = 3,
1262 SVGA3D_PRIMITIVE_LINESTRIP = 4,
1263 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
1264 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
1265 SVGA3D_PRIMITIVE_LINELIST_ADJ = 7,
1266 SVGA3D_PRIMITIVE_PREDX_MAX = 7,
1267 SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
1268 SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
1269 SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
1270 SVGA3D_PRIMITIVE_MAX
1271} SVGA3dPrimitiveType;
1272
1273typedef enum {
1274 SVGA3D_COORDINATE_INVALID = 0,
1275 SVGA3D_COORDINATE_LEFTHANDED = 1,
1276 SVGA3D_COORDINATE_RIGHTHANDED = 2,
1277 SVGA3D_COORDINATE_MAX
1278} SVGA3dCoordinateType;
1279
1280typedef enum {
1281 SVGA3D_TRANSFORM_INVALID = 0,
1282 SVGA3D_TRANSFORM_WORLD = 1,
1283 SVGA3D_TRANSFORM_MIN = 1,
1284 SVGA3D_TRANSFORM_VIEW = 2,
1285 SVGA3D_TRANSFORM_PROJECTION = 3,
1286 SVGA3D_TRANSFORM_TEXTURE0 = 4,
1287 SVGA3D_TRANSFORM_TEXTURE1 = 5,
1288 SVGA3D_TRANSFORM_TEXTURE2 = 6,
1289 SVGA3D_TRANSFORM_TEXTURE3 = 7,
1290 SVGA3D_TRANSFORM_TEXTURE4 = 8,
1291 SVGA3D_TRANSFORM_TEXTURE5 = 9,
1292 SVGA3D_TRANSFORM_TEXTURE6 = 10,
1293 SVGA3D_TRANSFORM_TEXTURE7 = 11,
1294 SVGA3D_TRANSFORM_WORLD1 = 12,
1295 SVGA3D_TRANSFORM_WORLD2 = 13,
1296 SVGA3D_TRANSFORM_WORLD3 = 14,
1297 SVGA3D_TRANSFORM_MAX
1298} SVGA3dTransformType;
1299
1300typedef enum {
1301 SVGA3D_LIGHTTYPE_INVALID = 0,
1302 SVGA3D_LIGHTTYPE_MIN = 1,
1303 SVGA3D_LIGHTTYPE_POINT = 1,
1304 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
1305 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
1306 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
1307 SVGA3D_LIGHTTYPE_MAX
1308} SVGA3dLightType;
1309
1310typedef enum {
1311 SVGA3D_CUBEFACE_POSX = 0,
1312 SVGA3D_CUBEFACE_NEGX = 1,
1313 SVGA3D_CUBEFACE_POSY = 2,
1314 SVGA3D_CUBEFACE_NEGY = 3,
1315 SVGA3D_CUBEFACE_POSZ = 4,
1316 SVGA3D_CUBEFACE_NEGZ = 5,
1317} SVGA3dCubeFace;
1318
1319typedef enum {
1320 SVGA3D_SHADERTYPE_INVALID = 0,
1321 SVGA3D_SHADERTYPE_MIN = 1,
1322 SVGA3D_SHADERTYPE_VS = 1,
1323 SVGA3D_SHADERTYPE_PS = 2,
1324 SVGA3D_SHADERTYPE_PREDX_MAX = 3,
1325 SVGA3D_SHADERTYPE_GS = 3,
1326 SVGA3D_SHADERTYPE_DX10_MAX = 4,
1327 SVGA3D_SHADERTYPE_HS = 4,
1328 SVGA3D_SHADERTYPE_DS = 5,
1329 SVGA3D_SHADERTYPE_CS = 6,
1330 SVGA3D_SHADERTYPE_MAX = 7
1331} SVGA3dShaderType;
1332
1333#define SVGA3D_NUM_SHADERTYPE_PREDX \
1334 (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
1335
1336#define SVGA3D_NUM_SHADERTYPE_DX10 \
1337 (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
1338
1339#define SVGA3D_NUM_SHADERTYPE \
1340 (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
1341
1342typedef enum {
1343 SVGA3D_CONST_TYPE_MIN = 0,
1344 SVGA3D_CONST_TYPE_FLOAT = 0,
1345 SVGA3D_CONST_TYPE_INT = 1,
1346 SVGA3D_CONST_TYPE_BOOL = 2,
1347 SVGA3D_CONST_TYPE_MAX = 3,
1348} SVGA3dShaderConstType;
1349
1350/*
1351 * Register limits for shader consts.
1352 */
1353#define SVGA3D_CONSTREG_MAX 256
1354#define SVGA3D_CONSTINTREG_MAX 16
1355#define SVGA3D_CONSTBOOLREG_MAX 16
1356
1357typedef enum {
1358 SVGA3D_STRETCH_BLT_POINT = 0,
1359 SVGA3D_STRETCH_BLT_LINEAR = 1,
1360 SVGA3D_STRETCH_BLT_MAX
1361} SVGA3dStretchBltMode;
1362
1363typedef enum {
1364 SVGA3D_QUERYTYPE_INVALID = ((uint8)-1),
1365 SVGA3D_QUERYTYPE_MIN = 0,
1366 SVGA3D_QUERYTYPE_OCCLUSION = 0,
1367 SVGA3D_QUERYTYPE_TIMESTAMP = 1,
1368 SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2,
1369 SVGA3D_QUERYTYPE_PIPELINESTATS = 3,
1370 SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4,
1371 SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
1372 SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
1373 SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
1374 SVGA3D_QUERYTYPE_EVENT = 8,
1375 SVGA3D_QUERYTYPE_DX10_MAX = 9,
1376 SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
1377 SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
1378 SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
1379 SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
1380 SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
1381 SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
1382 SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
1383 SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
1384 SVGA3D_QUERYTYPE_MAX
1385} SVGA3dQueryType;
1386
1387typedef uint8 SVGA3dQueryTypeUint8;
1388
1389#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
1390
1391/*
1392 * This is the maximum number of queries per context that can be active
1393 * simultaneously between a beginQuery and endQuery.
1394 */
1395#define SVGA3D_MAX_QUERY 64
1396
1397/*
1398 * Query result buffer formats
1399 */
1400typedef
1401#include "vmware_pack_begin.h"
1402struct {
1403 uint32 samplesRendered;
1404}
1405#include "vmware_pack_end.h"
1406SVGADXOcclusionQueryResult;
1407
1408typedef
1409#include "vmware_pack_begin.h"
1410struct {
1411 uint32 passed;
1412}
1413#include "vmware_pack_end.h"
1414SVGADXEventQueryResult;
1415
1416typedef
1417#include "vmware_pack_begin.h"
1418struct {
1419 uint64 timestamp;
1420}
1421#include "vmware_pack_end.h"
1422SVGADXTimestampQueryResult;
1423
1424typedef
1425#include "vmware_pack_begin.h"
1426struct {
1427 uint64 realFrequency;
1428 uint32 disjoint;
1429}
1430#include "vmware_pack_end.h"
1431SVGADXTimestampDisjointQueryResult;
1432
1433typedef
1434#include "vmware_pack_begin.h"
1435struct {
1436 uint64 inputAssemblyVertices;
1437 uint64 inputAssemblyPrimitives;
1438 uint64 vertexShaderInvocations;
1439 uint64 geometryShaderInvocations;
1440 uint64 geometryShaderPrimitives;
1441 uint64 clipperInvocations;
1442 uint64 clipperPrimitives;
1443 uint64 pixelShaderInvocations;
1444 uint64 hullShaderInvocations;
1445 uint64 domainShaderInvocations;
1446 uint64 computeShaderInvocations;
1447}
1448#include "vmware_pack_end.h"
1449SVGADXPipelineStatisticsQueryResult;
1450
1451typedef
1452#include "vmware_pack_begin.h"
1453struct {
1454 uint32 anySamplesRendered;
1455}
1456#include "vmware_pack_end.h"
1457SVGADXOcclusionPredicateQueryResult;
1458
1459typedef
1460#include "vmware_pack_begin.h"
1461struct {
1462 uint64 numPrimitivesWritten;
1463 uint64 numPrimitivesRequired;
1464}
1465#include "vmware_pack_end.h"
1466SVGADXStreamOutStatisticsQueryResult;
1467
1468typedef
1469#include "vmware_pack_begin.h"
1470struct {
1471 uint32 overflowed;
1472}
1473#include "vmware_pack_end.h"
1474SVGADXStreamOutPredicateQueryResult;
1475
1476typedef
1477#include "vmware_pack_begin.h"
1478struct {
1479 uint64 samplesRendered;
1480}
1481#include "vmware_pack_end.h"
1482SVGADXOcclusion64QueryResult;
1483
1484/*
1485 * SVGADXQueryResultUnion is not intended for use in the protocol, but is
1486 * very helpful when working with queries generically.
1487 */
1488typedef
1489#include "vmware_pack_begin.h"
1490union SVGADXQueryResultUnion {
1491 SVGADXOcclusionQueryResult occ;
1492 SVGADXEventQueryResult event;
1493 SVGADXTimestampQueryResult ts;
1494 SVGADXTimestampDisjointQueryResult tsDisjoint;
1495 SVGADXPipelineStatisticsQueryResult pipelineStats;
1496 SVGADXOcclusionPredicateQueryResult occPred;
1497 SVGADXStreamOutStatisticsQueryResult soStats;
1498 SVGADXStreamOutPredicateQueryResult soPred;
1499 SVGADXOcclusion64QueryResult occ64;
1500}
1501#include "vmware_pack_end.h"
1502SVGADXQueryResultUnion;
1503
1504
1505typedef enum {
1506 SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
1507 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
1508 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */
1509 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */
1510} SVGA3dQueryState;
1511
1512typedef enum {
1513 SVGA3D_WRITE_HOST_VRAM = 1,
1514 SVGA3D_READ_HOST_VRAM = 2,
1515} SVGA3dTransferType;
1516
1517typedef enum {
1518 SVGA3D_LOGICOP_INVALID = 0,
1519 SVGA3D_LOGICOP_MIN = 1,
1520 SVGA3D_LOGICOP_COPY = 1,
1521 SVGA3D_LOGICOP_NOT = 2,
1522 SVGA3D_LOGICOP_AND = 3,
1523 SVGA3D_LOGICOP_OR = 4,
1524 SVGA3D_LOGICOP_XOR = 5,
1525 SVGA3D_LOGICOP_NXOR = 6,
1526 SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
1527 SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
1528 SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
1529} SVGA3dLogicOp;
1530
1531typedef
1532#include "vmware_pack_begin.h"
1533struct {
1534 union {
1535 struct {
1536 uint16 function; /* SVGA3dFogFunction */
1537 uint8 type; /* SVGA3dFogType */
1538 uint8 base; /* SVGA3dFogBase */
1539 };
1540 uint32 uintValue;
1541 };
1542}
1543#include "vmware_pack_end.h"
1544SVGA3dFogMode;
1545
1546/*
1547 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1548 * is a surface ID as well as face/mipmap indices.
1549 */
1550
1551typedef
1552#include "vmware_pack_begin.h"
1553struct SVGA3dSurfaceImageId {
1554 uint32 sid;
1555 uint32 face;
1556 uint32 mipmap;
1557}
1558#include "vmware_pack_end.h"
1559SVGA3dSurfaceImageId;
1560
1561typedef
1562#include "vmware_pack_begin.h"
1563struct {
1564 uint32 width;
1565 uint32 height;
1566 uint32 depth;
1567}
1568#include "vmware_pack_end.h"
1569SVGA3dSize;
1570
1571/*
1572 * Guest-backed objects definitions.
1573 */
1574typedef enum {
1575 SVGA_OTABLE_MOB = 0,
1576 SVGA_OTABLE_MIN = 0,
1577 SVGA_OTABLE_SURFACE = 1,
1578 SVGA_OTABLE_CONTEXT = 2,
1579 SVGA_OTABLE_SHADER = 3,
1580 SVGA_OTABLE_SCREENTARGET = 4,
1581
1582 SVGA_OTABLE_DX9_MAX = 5,
1583
1584 SVGA_OTABLE_DXCONTEXT = 5,
1585 SVGA_OTABLE_MAX = 6
1586} SVGAOTableType;
1587
1588/*
1589 * Deprecated.
1590 */
1591#define SVGA_OTABLE_COUNT 4
1592
1593typedef enum {
1594 SVGA_COTABLE_MIN = 0,
1595 SVGA_COTABLE_RTVIEW = 0,
1596 SVGA_COTABLE_DSVIEW = 1,
1597 SVGA_COTABLE_SRVIEW = 2,
1598 SVGA_COTABLE_ELEMENTLAYOUT = 3,
1599 SVGA_COTABLE_BLENDSTATE = 4,
1600 SVGA_COTABLE_DEPTHSTENCIL = 5,
1601 SVGA_COTABLE_RASTERIZERSTATE = 6,
1602 SVGA_COTABLE_SAMPLER = 7,
1603 SVGA_COTABLE_STREAMOUTPUT = 8,
1604 SVGA_COTABLE_DXQUERY = 9,
1605 SVGA_COTABLE_DXSHADER = 10,
1606 SVGA_COTABLE_DX10_MAX = 11,
1607 SVGA_COTABLE_UAVIEW = 11,
1608 SVGA_COTABLE_MAX
1609} SVGACOTableType;
1610
1611/*
1612 * The largest size (number of entries) allowed in a COTable.
1613 */
1614#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
1615
1616typedef enum SVGAMobFormat {
1617 SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
1618 SVGA3D_MOBFMT_PTDEPTH_0 = 0,
1619 SVGA3D_MOBFMT_MIN = 0,
1620 SVGA3D_MOBFMT_PTDEPTH_1 = 1,
1621 SVGA3D_MOBFMT_PTDEPTH_2 = 2,
1622 SVGA3D_MOBFMT_RANGE = 3,
1623 SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
1624 SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
1625 SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
1626 SVGA3D_MOBFMT_PREDX_MAX = 7,
1627 SVGA3D_MOBFMT_EMPTY = 7,
1628 SVGA3D_MOBFMT_MAX,
1629} SVGAMobFormat;
1630
1631#define SVGA3D_MOB_EMPTY_BASE 1
1632
1633#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 8e8d9682e018..884b1d1fb85f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,5 @@
1/********************************************************** 1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved. 2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person 4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation 5 * obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index f38416fcb046..faf6d9b2b891 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,5 @@
1/********************************************************** 1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved. 2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person 4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation 5 * obtaining a copy of this software and associated documentation
@@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
152 switch (format) { 152 switch (format) {
153 case VMWARE_FOURCC_YV12: 153 case VMWARE_FOURCC_YV12:
154 *height = (*height + 1) & ~1; 154 *height = (*height + 1) & ~1;
155 *size = (*width + 3) & ~3; 155 *size = (*width) * (*height);
156 156
157 if (pitches) { 157 if (pitches) {
158 pitches[0] = *size; 158 pitches[0] = *width;
159 } 159 }
160 160
161 *size *= *height;
162
163 if (offsets) { 161 if (offsets) {
164 offsets[1] = *size; 162 offsets[1] = *size;
165 } 163 }
166 164
167 tmp = ((*width >> 1) + 3) & ~3; 165 tmp = *width >> 1;
168 166
169 if (pitches) { 167 if (pitches) {
170 pitches[1] = pitches[2] = tmp; 168 pitches[1] = pitches[2] = tmp;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index e4259c2c1acc..6e0ccb70a700 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,5 @@
1/********************************************************** 1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved. 2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person 4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation 5 * obtaining a copy of this software and associated documentation
@@ -31,20 +31,38 @@
31 31
32#ifndef _SVGA_REG_H_ 32#ifndef _SVGA_REG_H_
33#define _SVGA_REG_H_ 33#define _SVGA_REG_H_
34#include <linux/pci_ids.h>
35
36#define INCLUDE_ALLOW_MODULE
37#define INCLUDE_ALLOW_USERLEVEL
38
39#define INCLUDE_ALLOW_VMCORE
40#include "includeCheck.h"
41
42#include "svga_types.h"
34 43
35/* 44/*
36 * PCI device IDs. 45 * SVGA_REG_ENABLE bit definitions.
37 */ 46 */
38#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 47typedef enum {
48 SVGA_REG_ENABLE_DISABLE = 0,
49 SVGA_REG_ENABLE_ENABLE = (1 << 0),
50 SVGA_REG_ENABLE_HIDE = (1 << 1),
51} SvgaRegEnable;
52
53typedef uint32 SVGAMobId;
39 54
40/* 55/*
41 * SVGA_REG_ENABLE bit definitions. 56 * Arbitrary and meaningless limits. Please ignore these when writing
57 * new drivers.
42 */ 58 */
43#define SVGA_REG_ENABLE_DISABLE 0 59#define SVGA_MAX_WIDTH 2560
44#define SVGA_REG_ENABLE_ENABLE 1 60#define SVGA_MAX_HEIGHT 1600
45#define SVGA_REG_ENABLE_HIDE 2 61
46#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\ 62
47 SVGA_REG_ENABLE_HIDE) 63#define SVGA_MAX_BITS_PER_PIXEL 32
64#define SVGA_MAX_DEPTH 24
65#define SVGA_MAX_DISPLAYS 10
48 66
49/* 67/*
50 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned 68 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
@@ -57,14 +75,9 @@
57#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */ 75#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
58 76
59/* 77/*
60 * The maximum framebuffer size that can traced for e.g. guests in VESA mode. 78 * The maximum framebuffer size that can traced for guests unless the
61 * The changeMap in the monitor is proportional to this number. Therefore, we'd 79 * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case
62 * like to keep it as small as possible to reduce monitor overhead (using 80 * the full framebuffer can be traced independent of this limit.
63 * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
64 * 4k!).
65 *
66 * NB: For compatibility reasons, this value must be greater than 0xff0000.
67 * See bug 335072.
68 */ 81 */
69#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000 82#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
70 83
@@ -106,6 +119,8 @@
106#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */ 119#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
107#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */ 120#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
108#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */ 121#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
122#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
123#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
109 124
110/* 125/*
111 * Registers 126 * Registers
@@ -131,6 +146,7 @@ enum {
131 SVGA_REG_FB_SIZE = 16, 146 SVGA_REG_FB_SIZE = 16,
132 147
133 /* ID 0 implementation only had the above registers, then the palette */ 148 /* ID 0 implementation only had the above registers, then the palette */
149 SVGA_REG_ID_0_TOP = 17,
134 150
135 SVGA_REG_CAPABILITIES = 17, 151 SVGA_REG_CAPABILITIES = 17,
136 SVGA_REG_MEM_START = 18, /* (Deprecated) */ 152 SVGA_REG_MEM_START = 18, /* (Deprecated) */
@@ -171,7 +187,7 @@ enum {
171 SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ 187 SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
172 SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ 188 SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
173 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ 189 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
174 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ 190 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
175 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ 191 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
176 SVGA_REG_CMD_PREPEND_LOW = 53, 192 SVGA_REG_CMD_PREPEND_LOW = 53,
177 SVGA_REG_CMD_PREPEND_HIGH = 54, 193 SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -182,7 +198,6 @@ enum {
182 198
183 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 199 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
184 /* Next 768 (== 256*3) registers exist for colormap */ 200 /* Next 768 (== 256*3) registers exist for colormap */
185
186 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS 201 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
187 /* Base of scratch registers */ 202 /* Base of scratch registers */
188 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage: 203 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
@@ -190,7 +205,6 @@ enum {
190 the use of the current SVGA driver. */ 205 the use of the current SVGA driver. */
191}; 206};
192 207
193
194/* 208/*
195 * Guest memory regions (GMRs): 209 * Guest memory regions (GMRs):
196 * 210 *
@@ -288,17 +302,205 @@ enum {
288#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */ 302#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
289 303
290typedef 304typedef
305#include "vmware_pack_begin.h"
291struct SVGAGuestMemDescriptor { 306struct SVGAGuestMemDescriptor {
292 uint32 ppn; 307 uint32 ppn;
293 uint32 numPages; 308 uint32 numPages;
294} SVGAGuestMemDescriptor; 309}
310#include "vmware_pack_end.h"
311SVGAGuestMemDescriptor;
295 312
296typedef 313typedef
314#include "vmware_pack_begin.h"
297struct SVGAGuestPtr { 315struct SVGAGuestPtr {
298 uint32 gmrId; 316 uint32 gmrId;
299 uint32 offset; 317 uint32 offset;
300} SVGAGuestPtr; 318}
319#include "vmware_pack_end.h"
320SVGAGuestPtr;
321
322/*
323 * Register based command buffers --
324 *
325 * Provide an SVGA device interface that allows the guest to submit
326 * command buffers to the SVGA device through an SVGA device register.
327 * The metadata for each command buffer is contained in the
328 * SVGACBHeader structure along with the return status codes.
329 *
330 * The SVGA device supports command buffers if
331 * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The
332 * fifo must be enabled for command buffers to be submitted.
333 *
334 * Command buffers are submitted when the guest writing the 64 byte
335 * aligned physical address into the SVGA_REG_COMMAND_LOW and
336 * SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32
337 * bits of the physical address. SVGA_REG_COMMAND_LOW contains the
338 * lower 32 bits of the physical address, since the command buffer
339 * headers are required to be 64 byte aligned the lower 6 bits are
340 * used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW
341 * submits the command buffer to the device and queues it for
342 * execution. The SVGA device supports at least
343 * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
344 * per context and if that limit is reached the device will write the
345 * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
346 * buffer header synchronously and not raise any IRQs.
347 *
348 * It is invalid to submit a command buffer without a valid physical
349 * address and results are undefined.
350 *
351 * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
352 * will be supported. If a larger command buffer is submitted results
353 * are unspecified and the device will either complete the command
354 * buffer or return an error.
355 *
356 * The device guarantees that any individual command in a command
357 * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
358 * enough to fit a 64x64 color-cursor definition. If the command is
359 * too large the device is allowed to process the command or return an
360 * error.
361 *
362 * The device context is a special SVGACBContext that allows for
363 * synchronous register like accesses with the flexibility of
364 * commands. There is a different command set defined by
365 * SVGADeviceContextCmdId. The commands in each command buffer is not
366 * allowed to straddle physical pages.
367 *
368 * The offset field which is available starting with the
369 * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
370 * start of command processing into the buffer. If an error is
371 * encountered the errorOffset will still be relative to the specific
372 * PA, not biased by the offset. When the command buffer is finished
373 * the guest should not read the offset field as there is no guarantee
374 * what it will set to.
375 */
376
377#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
378#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
379#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
380
381#define SVGA_CB_CONTEXT_MASK 0x3f
382typedef enum {
383 SVGA_CB_CONTEXT_DEVICE = 0x3f,
384 SVGA_CB_CONTEXT_0 = 0x0,
385 SVGA_CB_CONTEXT_MAX = 0x1,
386} SVGACBContext;
387
388
389typedef enum {
390 /*
391 * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
392 * field before submitting the command buffer header, the host will
393 * change the value when it is done with the command buffer.
394 */
395 SVGA_CB_STATUS_NONE = 0,
396
397 /*
398 * Written by the host when a command buffer completes successfully.
399 * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
400 * the SVGA_CB_FLAG_NO_IRQ flag is set.
401 */
402 SVGA_CB_STATUS_COMPLETED = 1,
403
404 /*
405 * Written by the host synchronously with the command buffer
406 * submission to indicate the command buffer was not submitted. No
407 * IRQ is raised.
408 */
409 SVGA_CB_STATUS_QUEUE_FULL = 2,
410
411 /*
412 * Written by the host when an error was detected parsing a command
413 * in the command buffer, errorOffset is written to contain the
414 * offset to the first byte of the failing command. The device
415 * raises the IRQ with both SVGA_IRQFLAG_ERROR and
416 * SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been
417 * processed.
418 */
419 SVGA_CB_STATUS_COMMAND_ERROR = 3,
420
421 /*
422 * Written by the host if there is an error parsing the command
423 * buffer header. The device raises the IRQ with both
424 * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device
425 * did not processes any of the command buffer.
426 */
427 SVGA_CB_STATUS_CB_HEADER_ERROR = 4,
301 428
429 /*
430 * Written by the host if the guest requested the host to preempt
431 * the command buffer. The device will not raise any IRQs and the
432 * command buffer was not processed.
433 */
434 SVGA_CB_STATUS_PREEMPTED = 5,
435
436 /*
437 * Written by the host synchronously with the command buffer
438 * submission to indicate the the command buffer was not submitted
439 * due to an error. No IRQ is raised.
440 */
441 SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
442} SVGACBStatus;
443
444typedef enum {
445 SVGA_CB_FLAG_NONE = 0,
446 SVGA_CB_FLAG_NO_IRQ = 1 << 0,
447 SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
448 SVGA_CB_FLAG_MOB = 1 << 2,
449} SVGACBFlags;
450
451typedef
452#include "vmware_pack_begin.h"
453struct {
454 volatile SVGACBStatus status;
455 volatile uint32 errorOffset;
456 uint64 id;
457 SVGACBFlags flags;
458 uint32 length;
459 union {
460 PA pa;
461 struct {
462 SVGAMobId mobid;
463 uint32 mobOffset;
464 } mob;
465 } ptr;
466 uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
467 uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
468 uint32 mustBeZero[6];
469}
470#include "vmware_pack_end.h"
471SVGACBHeader;
472
473typedef enum {
474 SVGA_DC_CMD_NOP = 0,
475 SVGA_DC_CMD_START_STOP_CONTEXT = 1,
476 SVGA_DC_CMD_PREEMPT = 2,
477 SVGA_DC_CMD_MAX = 3,
478 SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
479} SVGADeviceContextCmdId;
480
481typedef struct {
482 uint32 enable;
483 SVGACBContext context;
484} SVGADCCmdStartStop;
485
486/*
487 * SVGADCCmdPreempt --
488 *
489 * This command allows the guest to request that all command buffers
490 * on the specified context be preempted that can be. After execution
491 * of this command all command buffers that were preempted will
492 * already have SVGA_CB_STATUS_PREEMPTED written into the status
493 * field. The device might still be processing a command buffer,
494 * assuming execution of it started before the preemption request was
495 * received. Specifying the ignoreIDZero flag to TRUE will cause the
496 * device to not preempt command buffers with the id field in the
497 * command buffer header set to zero.
498 */
499
500typedef struct {
501 SVGACBContext context;
502 uint32 ignoreIDZero;
503} SVGADCCmdPreempt;
302 504
303/* 505/*
304 * SVGAGMRImageFormat -- 506 * SVGAGMRImageFormat --
@@ -320,13 +522,12 @@ struct SVGAGuestPtr {
320 * 522 *
321 */ 523 */
322 524
323typedef 525typedef struct SVGAGMRImageFormat {
324struct SVGAGMRImageFormat {
325 union { 526 union {
326 struct { 527 struct {
327 uint32 bitsPerPixel : 8; 528 uint32 bitsPerPixel : 8;
328 uint32 colorDepth : 8; 529 uint32 colorDepth : 8;
329 uint32 reserved : 16; /* Must be zero */ 530 uint32 reserved : 16; /* Must be zero */
330 }; 531 };
331 532
332 uint32 value; 533 uint32 value;
@@ -334,6 +535,7 @@ struct SVGAGMRImageFormat {
334} SVGAGMRImageFormat; 535} SVGAGMRImageFormat;
335 536
336typedef 537typedef
538#include "vmware_pack_begin.h"
337struct SVGAGuestImage { 539struct SVGAGuestImage {
338 SVGAGuestPtr ptr; 540 SVGAGuestPtr ptr;
339 541
@@ -353,7 +555,9 @@ struct SVGAGuestImage {
353 * assuming each row of blocks is tightly packed. 555 * assuming each row of blocks is tightly packed.
354 */ 556 */
355 uint32 pitch; 557 uint32 pitch;
356} SVGAGuestImage; 558}
559#include "vmware_pack_end.h"
560SVGAGuestImage;
357 561
358/* 562/*
359 * SVGAColorBGRX -- 563 * SVGAColorBGRX --
@@ -363,14 +567,13 @@ struct SVGAGuestImage {
363 * GMRFB state. 567 * GMRFB state.
364 */ 568 */
365 569
366typedef 570typedef struct SVGAColorBGRX {
367struct SVGAColorBGRX {
368 union { 571 union {
369 struct { 572 struct {
370 uint32 b : 8; 573 uint32 b : 8;
371 uint32 g : 8; 574 uint32 g : 8;
372 uint32 r : 8; 575 uint32 r : 8;
373 uint32 x : 8; /* Unused */ 576 uint32 x : 8; /* Unused */
374 }; 577 };
375 578
376 uint32 value; 579 uint32 value;
@@ -392,26 +595,49 @@ struct SVGAColorBGRX {
392 */ 595 */
393 596
394typedef 597typedef
395struct SVGASignedRect { 598#include "vmware_pack_begin.h"
599struct {
396 int32 left; 600 int32 left;
397 int32 top; 601 int32 top;
398 int32 right; 602 int32 right;
399 int32 bottom; 603 int32 bottom;
400} SVGASignedRect; 604}
605#include "vmware_pack_end.h"
606SVGASignedRect;
401 607
402typedef 608typedef
403struct SVGASignedPoint { 609#include "vmware_pack_begin.h"
610struct {
404 int32 x; 611 int32 x;
405 int32 y; 612 int32 y;
406} SVGASignedPoint; 613}
614#include "vmware_pack_end.h"
615SVGASignedPoint;
407 616
408 617
409/* 618/*
410 * Capabilities 619 * SVGA Device Capabilities
620 *
621 * Note the holes in the bitfield. Missing bits have been deprecated,
622 * and must not be reused. Those capabilities will never be reported
623 * by new versions of the SVGA device.
624 *
625 * XXX: Add longer descriptions for each capability, including a list
626 * of the new features that each capability provides.
411 * 627 *
412 * Note the holes in the bitfield. Missing bits have been deprecated, 628 * SVGA_CAP_IRQMASK --
413 * and must not be reused. Those capabilities will never be reported 629 * Provides device interrupts. Adds device register SVGA_REG_IRQMASK
414 * by new versions of the SVGA device. 630 * to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
631 * set/clear pending interrupts.
632 *
633 * SVGA_CAP_GMR --
634 * Provides synchronous mapping of guest memory regions (GMR).
635 * Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
636 * SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
637 *
638 * SVGA_CAP_TRACES --
639 * Allows framebuffer trace-based updates even when FIFO is enabled.
640 * Adds device register SVGA_REG_TRACES.
415 * 641 *
416 * SVGA_CAP_GMR2 -- 642 * SVGA_CAP_GMR2 --
417 * Provides asynchronous commands to define and remap guest memory 643 * Provides asynchronous commands to define and remap guest memory
@@ -421,21 +647,39 @@ struct SVGASignedPoint {
421 * SVGA_CAP_SCREEN_OBJECT_2 -- 647 * SVGA_CAP_SCREEN_OBJECT_2 --
422 * Allow screen object support, and require backing stores from the 648 * Allow screen object support, and require backing stores from the
423 * guest for each screen object. 649 * guest for each screen object.
650 *
651 * SVGA_CAP_COMMAND_BUFFERS --
652 * Enable register based command buffer submission.
653 *
654 * SVGA_CAP_DEAD1 --
655 * This cap was incorrectly used by old drivers and should not be
656 * reused.
657 *
658 * SVGA_CAP_CMD_BUFFERS_2 --
659 * Enable support for the prepend command buffer submision
660 * registers. SVGA_REG_CMD_PREPEND_LOW and
661 * SVGA_REG_CMD_PREPEND_HIGH.
662 *
663 * SVGA_CAP_GBOBJECTS --
664 * Enable guest-backed objects and surfaces.
665 *
666 * SVGA_CAP_CMD_BUFFERS_3 --
667 * Enable support for command buffers in a mob.
424 */ 668 */
425 669
426#define SVGA_CAP_NONE 0x00000000 670#define SVGA_CAP_NONE 0x00000000
427#define SVGA_CAP_RECT_COPY 0x00000002 671#define SVGA_CAP_RECT_COPY 0x00000002
428#define SVGA_CAP_CURSOR 0x00000020 672#define SVGA_CAP_CURSOR 0x00000020
429#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */ 673#define SVGA_CAP_CURSOR_BYPASS 0x00000040
430#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */ 674#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080
431#define SVGA_CAP_8BIT_EMULATION 0x00000100 675#define SVGA_CAP_8BIT_EMULATION 0x00000100
432#define SVGA_CAP_ALPHA_CURSOR 0x00000200 676#define SVGA_CAP_ALPHA_CURSOR 0x00000200
433#define SVGA_CAP_3D 0x00004000 677#define SVGA_CAP_3D 0x00004000
434#define SVGA_CAP_EXTENDED_FIFO 0x00008000 678#define SVGA_CAP_EXTENDED_FIFO 0x00008000
435#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */ 679#define SVGA_CAP_MULTIMON 0x00010000
436#define SVGA_CAP_PITCHLOCK 0x00020000 680#define SVGA_CAP_PITCHLOCK 0x00020000
437#define SVGA_CAP_IRQMASK 0x00040000 681#define SVGA_CAP_IRQMASK 0x00040000
438#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */ 682#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000
439#define SVGA_CAP_GMR 0x00100000 683#define SVGA_CAP_GMR 0x00100000
440#define SVGA_CAP_TRACES 0x00200000 684#define SVGA_CAP_TRACES 0x00200000
441#define SVGA_CAP_GMR2 0x00400000 685#define SVGA_CAP_GMR2 0x00400000
@@ -444,6 +688,33 @@ struct SVGASignedPoint {
444#define SVGA_CAP_DEAD1 0x02000000 688#define SVGA_CAP_DEAD1 0x02000000
445#define SVGA_CAP_CMD_BUFFERS_2 0x04000000 689#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
446#define SVGA_CAP_GBOBJECTS 0x08000000 690#define SVGA_CAP_GBOBJECTS 0x08000000
691#define SVGA_CAP_DX 0x10000000
692
693#define SVGA_CAP_CMD_RESERVED 0x80000000
694
695
696/*
697 * The Guest can optionally read some SVGA device capabilities through
698 * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
699 * the SVGA device is initialized. The type of capability the guest
700 * is requesting from the SVGABackdoorCapType enum should be placed in
701 * the upper 16 bits of the backdoor command id (ECX). On success the
702 * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
703 * the requested capability. If the command is not supported then EBX
704 * will be left unchanged and EAX will be set to -1. Because it is
705 * possible that -1 is the value of the requested cap the correct way
706 * to check if the command was successful is to check if EBX was changed
707 * to BDOOR_MAGIC making sure to initialize the register to something
708 * else first.
709 */
710
711typedef enum {
712 SVGABackdoorCapDeviceCaps = 0,
713 SVGABackdoorCapFifoCaps = 1,
714 SVGABackdoorCap3dHWVersion = 2,
715 SVGABackdoorCapMax = 3,
716} SVGABackdoorCapType;
717
447 718
448/* 719/*
449 * FIFO register indices. 720 * FIFO register indices.
@@ -883,7 +1154,8 @@ enum {
883 SVGA_VIDEO_PITCH_2, 1154 SVGA_VIDEO_PITCH_2,
884 SVGA_VIDEO_PITCH_3, 1155 SVGA_VIDEO_PITCH_3,
885 SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */ 1156 SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
886 SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */ 1157 SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
1158 /* (SVGA_ID_INVALID) */
887 SVGA_VIDEO_NUM_REGS 1159 SVGA_VIDEO_NUM_REGS
888}; 1160};
889 1161
@@ -896,7 +1168,9 @@ enum {
896 * video frame to be displayed. 1168 * video frame to be displayed.
897 */ 1169 */
898 1170
899typedef struct SVGAOverlayUnit { 1171typedef
1172#include "vmware_pack_begin.h"
1173struct SVGAOverlayUnit {
900 uint32 enabled; 1174 uint32 enabled;
901 uint32 flags; 1175 uint32 flags;
902 uint32 dataOffset; 1176 uint32 dataOffset;
@@ -916,7 +1190,27 @@ typedef struct SVGAOverlayUnit {
916 uint32 pitches[3]; 1190 uint32 pitches[3];
917 uint32 dataGMRId; 1191 uint32 dataGMRId;
918 uint32 dstScreenId; 1192 uint32 dstScreenId;
919} SVGAOverlayUnit; 1193}
1194#include "vmware_pack_end.h"
1195SVGAOverlayUnit;
1196
1197
1198/*
1199 * Guest display topology
1200 *
1201 * XXX: This structure is not part of the SVGA device's interface, and
1202 * doesn't really belong here.
1203 */
1204#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
1205
1206typedef struct SVGADisplayTopology {
1207 uint16 displayId;
1208 uint16 isPrimary;
1209 uint32 width;
1210 uint32 height;
1211 uint32 positionX;
1212 uint32 positionY;
1213} SVGADisplayTopology;
920 1214
921 1215
922/* 1216/*
@@ -951,10 +1245,10 @@ typedef struct SVGAOverlayUnit {
951 * value of zero means no cloning should happen. 1245 * value of zero means no cloning should happen.
952 */ 1246 */
953 1247
954#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */ 1248#define SVGA_SCREEN_MUST_BE_SET (1 << 0)
955#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */ 1249#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
956#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */ 1250#define SVGA_SCREEN_IS_PRIMARY (1 << 1)
957#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */ 1251#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
958 1252
959/* 1253/*
960 * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is 1254 * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
@@ -977,7 +1271,8 @@ typedef struct SVGAOverlayUnit {
977#define SVGA_SCREEN_BLANKING (1 << 4) 1271#define SVGA_SCREEN_BLANKING (1 << 4)
978 1272
979typedef 1273typedef
980struct SVGAScreenObject { 1274#include "vmware_pack_begin.h"
1275struct {
981 uint32 structSize; /* sizeof(SVGAScreenObject) */ 1276 uint32 structSize; /* sizeof(SVGAScreenObject) */
982 uint32 id; 1277 uint32 id;
983 uint32 flags; 1278 uint32 flags;
@@ -995,8 +1290,17 @@ struct SVGAScreenObject {
995 * with SVGA_FIFO_CAP_SCREEN_OBJECT. 1290 * with SVGA_FIFO_CAP_SCREEN_OBJECT.
996 */ 1291 */
997 SVGAGuestImage backingStore; 1292 SVGAGuestImage backingStore;
1293
1294 /*
1295 * The cloneCount field is treated as a hint from the guest that
1296 * the user wants this display to be cloned, cloneCount times.
1297 *
1298 * A value of zero means no cloning should happen.
1299 */
998 uint32 cloneCount; 1300 uint32 cloneCount;
999} SVGAScreenObject; 1301}
1302#include "vmware_pack_end.h"
1303SVGAScreenObject;
1000 1304
1001 1305
1002/* 1306/*
@@ -1009,7 +1313,7 @@ struct SVGAScreenObject {
1009 * Note the holes in the command ID numbers: These commands have been 1313 * Note the holes in the command ID numbers: These commands have been
1010 * deprecated, and the old IDs must not be reused. 1314 * deprecated, and the old IDs must not be reused.
1011 * 1315 *
1012 * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D 1316 * Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
1013 * protocol. 1317 * protocol.
1014 * 1318 *
1015 * Each command's parameters are described by the comments and 1319 * Each command's parameters are described by the comments and
@@ -1020,6 +1324,7 @@ typedef enum {
1020 SVGA_CMD_INVALID_CMD = 0, 1324 SVGA_CMD_INVALID_CMD = 0,
1021 SVGA_CMD_UPDATE = 1, 1325 SVGA_CMD_UPDATE = 1,
1022 SVGA_CMD_RECT_COPY = 3, 1326 SVGA_CMD_RECT_COPY = 3,
1327 SVGA_CMD_RECT_ROP_COPY = 14,
1023 SVGA_CMD_DEFINE_CURSOR = 19, 1328 SVGA_CMD_DEFINE_CURSOR = 19,
1024 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22, 1329 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
1025 SVGA_CMD_UPDATE_VERBOSE = 25, 1330 SVGA_CMD_UPDATE_VERBOSE = 25,
@@ -1035,9 +1340,14 @@ typedef enum {
1035 SVGA_CMD_ANNOTATION_COPY = 40, 1340 SVGA_CMD_ANNOTATION_COPY = 40,
1036 SVGA_CMD_DEFINE_GMR2 = 41, 1341 SVGA_CMD_DEFINE_GMR2 = 41,
1037 SVGA_CMD_REMAP_GMR2 = 42, 1342 SVGA_CMD_REMAP_GMR2 = 42,
1343 SVGA_CMD_DEAD = 43,
1344 SVGA_CMD_DEAD_2 = 44,
1345 SVGA_CMD_NOP = 45,
1346 SVGA_CMD_NOP_ERROR = 46,
1038 SVGA_CMD_MAX 1347 SVGA_CMD_MAX
1039} SVGAFifoCmdId; 1348} SVGAFifoCmdId;
1040 1349
1350#define SVGA_CMD_MAX_DATASIZE (256 * 1024)
1041#define SVGA_CMD_MAX_ARGS 64 1351#define SVGA_CMD_MAX_ARGS 64
1042 1352
1043 1353
@@ -1070,12 +1380,15 @@ typedef enum {
1070 */ 1380 */
1071 1381
1072typedef 1382typedef
1073struct SVGAFifoCmdUpdate { 1383#include "vmware_pack_begin.h"
1384struct {
1074 uint32 x; 1385 uint32 x;
1075 uint32 y; 1386 uint32 y;
1076 uint32 width; 1387 uint32 width;
1077 uint32 height; 1388 uint32 height;
1078} SVGAFifoCmdUpdate; 1389}
1390#include "vmware_pack_end.h"
1391SVGAFifoCmdUpdate;
1079 1392
1080 1393
1081/* 1394/*
@@ -1089,14 +1402,44 @@ struct SVGAFifoCmdUpdate {
1089 */ 1402 */
1090 1403
1091typedef 1404typedef
1092struct SVGAFifoCmdRectCopy { 1405#include "vmware_pack_begin.h"
1406struct {
1407 uint32 srcX;
1408 uint32 srcY;
1409 uint32 destX;
1410 uint32 destY;
1411 uint32 width;
1412 uint32 height;
1413}
1414#include "vmware_pack_end.h"
1415SVGAFifoCmdRectCopy;
1416
1417
1418/*
1419 * SVGA_CMD_RECT_ROP_COPY --
1420 *
1421 * Perform a rectangular DMA transfer from one area of the GFB to
1422 * another, and copy the result to any screens which intersect it.
1423 * The value of ROP may only be SVGA_ROP_COPY, and this command is
1424 * only supported for backwards compatibility reasons.
1425 *
1426 * Availability:
1427 * SVGA_CAP_RECT_COPY
1428 */
1429
1430typedef
1431#include "vmware_pack_begin.h"
1432struct {
1093 uint32 srcX; 1433 uint32 srcX;
1094 uint32 srcY; 1434 uint32 srcY;
1095 uint32 destX; 1435 uint32 destX;
1096 uint32 destY; 1436 uint32 destY;
1097 uint32 width; 1437 uint32 width;
1098 uint32 height; 1438 uint32 height;
1099} SVGAFifoCmdRectCopy; 1439 uint32 rop;
1440}
1441#include "vmware_pack_end.h"
1442SVGAFifoCmdRectRopCopy;
1100 1443
1101 1444
1102/* 1445/*
@@ -1113,7 +1456,8 @@ struct SVGAFifoCmdRectCopy {
1113 */ 1456 */
1114 1457
1115typedef 1458typedef
1116struct SVGAFifoCmdDefineCursor { 1459#include "vmware_pack_begin.h"
1460struct {
1117 uint32 id; /* Reserved, must be zero. */ 1461 uint32 id; /* Reserved, must be zero. */
1118 uint32 hotspotX; 1462 uint32 hotspotX;
1119 uint32 hotspotY; 1463 uint32 hotspotY;
@@ -1125,7 +1469,9 @@ struct SVGAFifoCmdDefineCursor {
1125 * Followed by scanline data for AND mask, then XOR mask. 1469 * Followed by scanline data for AND mask, then XOR mask.
1126 * Each scanline is padded to a 32-bit boundary. 1470 * Each scanline is padded to a 32-bit boundary.
1127 */ 1471 */
1128} SVGAFifoCmdDefineCursor; 1472}
1473#include "vmware_pack_end.h"
1474SVGAFifoCmdDefineCursor;
1129 1475
1130 1476
1131/* 1477/*
@@ -1142,14 +1488,17 @@ struct SVGAFifoCmdDefineCursor {
1142 */ 1488 */
1143 1489
1144typedef 1490typedef
1145struct SVGAFifoCmdDefineAlphaCursor { 1491#include "vmware_pack_begin.h"
1492struct {
1146 uint32 id; /* Reserved, must be zero. */ 1493 uint32 id; /* Reserved, must be zero. */
1147 uint32 hotspotX; 1494 uint32 hotspotX;
1148 uint32 hotspotY; 1495 uint32 hotspotY;
1149 uint32 width; 1496 uint32 width;
1150 uint32 height; 1497 uint32 height;
1151 /* Followed by scanline data */ 1498 /* Followed by scanline data */
1152} SVGAFifoCmdDefineAlphaCursor; 1499}
1500#include "vmware_pack_end.h"
1501SVGAFifoCmdDefineAlphaCursor;
1153 1502
1154 1503
1155/* 1504/*
@@ -1165,13 +1514,16 @@ struct SVGAFifoCmdDefineAlphaCursor {
1165 */ 1514 */
1166 1515
1167typedef 1516typedef
1168struct SVGAFifoCmdUpdateVerbose { 1517#include "vmware_pack_begin.h"
1518struct {
1169 uint32 x; 1519 uint32 x;
1170 uint32 y; 1520 uint32 y;
1171 uint32 width; 1521 uint32 width;
1172 uint32 height; 1522 uint32 height;
1173 uint32 reason; 1523 uint32 reason;
1174} SVGAFifoCmdUpdateVerbose; 1524}
1525#include "vmware_pack_end.h"
1526SVGAFifoCmdUpdateVerbose;
1175 1527
1176 1528
1177/* 1529/*
@@ -1190,14 +1542,17 @@ struct SVGAFifoCmdUpdateVerbose {
1190#define SVGA_ROP_COPY 0x03 1542#define SVGA_ROP_COPY 0x03
1191 1543
1192typedef 1544typedef
1193struct SVGAFifoCmdFrontRopFill { 1545#include "vmware_pack_begin.h"
1546struct {
1194 uint32 color; /* In the same format as the GFB */ 1547 uint32 color; /* In the same format as the GFB */
1195 uint32 x; 1548 uint32 x;
1196 uint32 y; 1549 uint32 y;
1197 uint32 width; 1550 uint32 width;
1198 uint32 height; 1551 uint32 height;
1199 uint32 rop; /* Must be SVGA_ROP_COPY */ 1552 uint32 rop; /* Must be SVGA_ROP_COPY */
1200} SVGAFifoCmdFrontRopFill; 1553}
1554#include "vmware_pack_end.h"
1555SVGAFifoCmdFrontRopFill;
1201 1556
1202 1557
1203/* 1558/*
@@ -1216,9 +1571,12 @@ struct SVGAFifoCmdFrontRopFill {
1216 */ 1571 */
1217 1572
1218typedef 1573typedef
1574#include "vmware_pack_begin.h"
1219struct { 1575struct {
1220 uint32 fence; 1576 uint32 fence;
1221} SVGAFifoCmdFence; 1577}
1578#include "vmware_pack_end.h"
1579SVGAFifoCmdFence;
1222 1580
1223 1581
1224/* 1582/*
@@ -1233,11 +1591,14 @@ struct {
1233 */ 1591 */
1234 1592
1235typedef 1593typedef
1236struct SVGAFifoCmdEscape { 1594#include "vmware_pack_begin.h"
1595struct {
1237 uint32 nsid; 1596 uint32 nsid;
1238 uint32 size; 1597 uint32 size;
1239 /* followed by 'size' bytes of data */ 1598 /* followed by 'size' bytes of data */
1240} SVGAFifoCmdEscape; 1599}
1600#include "vmware_pack_end.h"
1601SVGAFifoCmdEscape;
1241 1602
1242 1603
1243/* 1604/*
@@ -1267,9 +1628,12 @@ struct SVGAFifoCmdEscape {
1267 */ 1628 */
1268 1629
1269typedef 1630typedef
1631#include "vmware_pack_begin.h"
1270struct { 1632struct {
1271 SVGAScreenObject screen; /* Variable-length according to version */ 1633 SVGAScreenObject screen; /* Variable-length according to version */
1272} SVGAFifoCmdDefineScreen; 1634}
1635#include "vmware_pack_end.h"
1636SVGAFifoCmdDefineScreen;
1273 1637
1274 1638
1275/* 1639/*
@@ -1283,9 +1647,12 @@ struct {
1283 */ 1647 */
1284 1648
1285typedef 1649typedef
1650#include "vmware_pack_begin.h"
1286struct { 1651struct {
1287 uint32 screenId; 1652 uint32 screenId;
1288} SVGAFifoCmdDestroyScreen; 1653}
1654#include "vmware_pack_end.h"
1655SVGAFifoCmdDestroyScreen;
1289 1656
1290 1657
1291/* 1658/*
@@ -1336,11 +1703,14 @@ struct {
1336 */ 1703 */
1337 1704
1338typedef 1705typedef
1706#include "vmware_pack_begin.h"
1339struct { 1707struct {
1340 SVGAGuestPtr ptr; 1708 SVGAGuestPtr ptr;
1341 uint32 bytesPerLine; 1709 uint32 bytesPerLine;
1342 SVGAGMRImageFormat format; 1710 SVGAGMRImageFormat format;
1343} SVGAFifoCmdDefineGMRFB; 1711}
1712#include "vmware_pack_end.h"
1713SVGAFifoCmdDefineGMRFB;
1344 1714
1345 1715
1346/* 1716/*
@@ -1348,19 +1718,10 @@ struct {
1348 * 1718 *
1349 * This is a guest-to-host blit. It performs a DMA operation to 1719 * This is a guest-to-host blit. It performs a DMA operation to
1350 * copy a rectangular region of pixels from the current GMRFB to 1720 * copy a rectangular region of pixels from the current GMRFB to
1351 * one or more Screen Objects. 1721 * a ScreenObject.
1352 * 1722 *
1353 * The destination coordinate may be specified relative to a 1723 * The destination coordinate may be specified relative to a
1354 * screen's origin (if a screen ID is specified) or relative to the 1724 * screen's origin. The provided screen ID must be valid.
1355 * virtual coordinate system's origin (if the screen ID is
1356 * SVGA_ID_INVALID). The actual destination may span zero or more
1357 * screens, in the case of a virtual destination rect or a rect
1358 * which extends off the edge of the specified screen.
1359 *
1360 * This command writes to the screen's "base layer": the underlying
1361 * framebuffer which exists below any cursor or video overlays. No
1362 * action is necessary to explicitly hide or update any overlays
1363 * which exist on top of the updated region.
1364 * 1725 *
1365 * The SVGA device is guaranteed to finish reading from the GMRFB 1726 * The SVGA device is guaranteed to finish reading from the GMRFB
1366 * by the time any subsequent FENCE commands are reached. 1727 * by the time any subsequent FENCE commands are reached.
@@ -1373,46 +1734,27 @@ struct {
1373 */ 1734 */
1374 1735
1375typedef 1736typedef
1737#include "vmware_pack_begin.h"
1376struct { 1738struct {
1377 SVGASignedPoint srcOrigin; 1739 SVGASignedPoint srcOrigin;
1378 SVGASignedRect destRect; 1740 SVGASignedRect destRect;
1379 uint32 destScreenId; 1741 uint32 destScreenId;
1380} SVGAFifoCmdBlitGMRFBToScreen; 1742}
1743#include "vmware_pack_end.h"
1744SVGAFifoCmdBlitGMRFBToScreen;
1381 1745
1382 1746
1383/* 1747/*
1384 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB -- 1748 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
1385 * 1749 *
1386 * This is a host-to-guest blit. It performs a DMA operation to 1750 * This is a host-to-guest blit. It performs a DMA operation to
1387 * copy a rectangular region of pixels from a single Screen Object 1751 * copy a rectangular region of pixels from a single ScreenObject
1388 * back to the current GMRFB. 1752 * back to the current GMRFB.
1389 * 1753 *
1390 * Usage note: This command should be used rarely. It will
1391 * typically be inefficient, but it is necessary for some types of
1392 * synchronization between 3D (GPU) and 2D (CPU) rendering into
1393 * overlapping areas of a screen.
1394 *
1395 * The source coordinate is specified relative to a screen's 1754 * The source coordinate is specified relative to a screen's
1396 * origin. The provided screen ID must be valid. If any parameters 1755 * origin. The provided screen ID must be valid. If any parameters
1397 * are invalid, the resulting pixel values are undefined. 1756 * are invalid, the resulting pixel values are undefined.
1398 * 1757 *
1399 * This command reads the screen's "base layer". Overlays like
1400 * video and cursor are not included, but any data which was sent
1401 * using a blit-to-screen primitive will be available, no matter
1402 * whether the data's original source was the GMRFB or the 3D
1403 * acceleration hardware.
1404 *
1405 * Note that our guest-to-host blits and host-to-guest blits aren't
1406 * symmetric in their current implementation. While the parameters
1407 * are identical, host-to-guest blits are a lot less featureful.
1408 * They do not support clipping: If the source parameters don't
1409 * fully fit within a screen, the blit fails. They must originate
1410 * from exactly one screen. Virtual coordinates are not directly
1411 * supported.
1412 *
1413 * Host-to-guest blits do support the same set of GMRFB formats
1414 * offered by guest-to-host blits.
1415 *
1416 * The SVGA device is guaranteed to finish writing to the GMRFB by 1758 * The SVGA device is guaranteed to finish writing to the GMRFB by
1417 * the time any subsequent FENCE commands are reached. 1759 * the time any subsequent FENCE commands are reached.
1418 * 1760 *
@@ -1421,77 +1763,57 @@ struct {
1421 */ 1763 */
1422 1764
1423typedef 1765typedef
1766#include "vmware_pack_begin.h"
1424struct { 1767struct {
1425 SVGASignedPoint destOrigin; 1768 SVGASignedPoint destOrigin;
1426 SVGASignedRect srcRect; 1769 SVGASignedRect srcRect;
1427 uint32 srcScreenId; 1770 uint32 srcScreenId;
1428} SVGAFifoCmdBlitScreenToGMRFB; 1771}
1772#include "vmware_pack_end.h"
1773SVGAFifoCmdBlitScreenToGMRFB;
1429 1774
1430 1775
1431/* 1776/*
1432 * SVGA_CMD_ANNOTATION_FILL -- 1777 * SVGA_CMD_ANNOTATION_FILL --
1433 * 1778 *
1434 * This is a blit annotation. This command stores a small piece of 1779 * The annotation commands have been deprecated, should not be used
1435 * device state which is consumed by the next blit-to-screen 1780 * by new drivers. They used to provide performance hints to the SVGA
1436 * command. The state is only cleared by commands which are 1781 * device about the content of screen updates, but newer SVGA devices
1437 * specifically documented as consuming an annotation. Other 1782 * ignore these.
1438 * commands (such as ESCAPEs for debugging) may intervene between
1439 * the annotation and its associated blit.
1440 *
1441 * This annotation is a promise about the contents of the next
1442 * blit: The video driver is guaranteeing that all pixels in that
1443 * blit will have the same value, specified here as a color in
1444 * SVGAColorBGRX format.
1445 *
1446 * The SVGA device can still render the blit correctly even if it
1447 * ignores this annotation, but the annotation may allow it to
1448 * perform the blit more efficiently, for example by ignoring the
1449 * source data and performing a fill in hardware.
1450 *
1451 * This annotation is most important for performance when the
1452 * user's display is being remoted over a network connection.
1453 * 1783 *
1454 * Availability: 1784 * Availability:
1455 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 1785 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
1456 */ 1786 */
1457 1787
1458typedef 1788typedef
1789#include "vmware_pack_begin.h"
1459struct { 1790struct {
1460 SVGAColorBGRX color; 1791 SVGAColorBGRX color;
1461} SVGAFifoCmdAnnotationFill; 1792}
1793#include "vmware_pack_end.h"
1794SVGAFifoCmdAnnotationFill;
1462 1795
1463 1796
1464/* 1797/*
1465 * SVGA_CMD_ANNOTATION_COPY -- 1798 * SVGA_CMD_ANNOTATION_COPY --
1466 * 1799 *
1467 * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more 1800 * The annotation commands have been deprecated, should not be used
1468 * information about annotations. 1801 * by new drivers. They used to provide performance hints to the SVGA
1469 * 1802 * device about the content of screen updates, but newer SVGA devices
1470 * This annotation is a promise about the contents of the next 1803 * ignore these.
1471 * blit: The video driver is guaranteeing that all pixels in that
1472 * blit will have the same value as those which already exist at an
1473 * identically-sized region on the same or a different screen.
1474 *
1475 * Note that the source pixels for the COPY in this annotation are
1476 * sampled before applying the anqnotation's associated blit. They
1477 * are allowed to overlap with the blit's destination pixels.
1478 *
1479 * The copy source rectangle is specified the same way as the blit
1480 * destination: it can be a rectangle which spans zero or more
1481 * screens, specified relative to either a screen or to the virtual
1482 * coordinate system's origin. If the source rectangle includes
1483 * pixels which are not from exactly one screen, the results are
1484 * undefined.
1485 * 1804 *
1486 * Availability: 1805 * Availability:
1487 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 1806 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
1488 */ 1807 */
1489 1808
1490typedef 1809typedef
1810#include "vmware_pack_begin.h"
1491struct { 1811struct {
1492 SVGASignedPoint srcOrigin; 1812 SVGASignedPoint srcOrigin;
1493 uint32 srcScreenId; 1813 uint32 srcScreenId;
1494} SVGAFifoCmdAnnotationCopy; 1814}
1815#include "vmware_pack_end.h"
1816SVGAFifoCmdAnnotationCopy;
1495 1817
1496 1818
1497/* 1819/*
@@ -1504,10 +1826,13 @@ struct {
1504 */ 1826 */
1505 1827
1506typedef 1828typedef
1829#include "vmware_pack_begin.h"
1507struct { 1830struct {
1508 uint32 gmrId; 1831 uint32 gmrId;
1509 uint32 numPages; 1832 uint32 numPages;
1510} SVGAFifoCmdDefineGMR2; 1833}
1834#include "vmware_pack_end.h"
1835SVGAFifoCmdDefineGMR2;
1511 1836
1512 1837
1513/* 1838/*
@@ -1546,6 +1871,7 @@ typedef enum {
1546} SVGARemapGMR2Flags; 1871} SVGARemapGMR2Flags;
1547 1872
1548typedef 1873typedef
1874#include "vmware_pack_begin.h"
1549struct { 1875struct {
1550 uint32 gmrId; 1876 uint32 gmrId;
1551 SVGARemapGMR2Flags flags; 1877 SVGARemapGMR2Flags flags;
@@ -1559,6 +1885,52 @@ struct {
1559 * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag 1885 * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
1560 * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. 1886 * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
1561 */ 1887 */
1562} SVGAFifoCmdRemapGMR2; 1888}
1889#include "vmware_pack_end.h"
1890SVGAFifoCmdRemapGMR2;
1891
1892
1893/*
1894 * Size of SVGA device memory such as frame buffer and FIFO.
1895 */
1896#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */
1897#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024)
1898#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
1899#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
1900#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
1901#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
1902#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
1903#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
1904
1905#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
1906
1907/*
1908 * To simplify autoDetect display configuration, support a minimum of
1909 * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
1910 * numDisplays = 2
1911 * maxWidth = numDisplay * 1920 = 3840
1912 * maxHeight = rotated width of single monitor = 1920
1913 * vramSize = maxWidth * maxHeight * 4 = 29491200
1914 */
1915#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
1916
1917#if defined(VMX86_SERVER)
1918#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
1919#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
1920#define SVGA_FIFO_SIZE (256 * 1024)
1921#define SVGA_FIFO_SIZE_3D (516 * 1024)
1922#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024)
1923#define SVGA_AUTODETECT_DEFAULT FALSE
1924#else
1925#define SVGA_VRAM_SIZE (16 * 1024 * 1024)
1926#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE
1927#define SVGA_FIFO_SIZE (2 * 1024 * 1024)
1928#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE
1929#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024)
1930#define SVGA_AUTODETECT_DEFAULT TRUE
1931#endif
1932
1933#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
1934#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
1563 1935
1564#endif 1936#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 000000000000..2e8ba4df8de9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
1/**********************************************************
2 * Copyright 2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25#ifndef _VM_BASIC_TYPES_H_
26#define _VM_BASIC_TYPES_H_
27#include <linux/kernel.h>
28
29typedef u32 uint32;
30typedef s32 int32;
31typedef u64 uint64;
32typedef u16 uint16;
33typedef s16 int16;
34typedef u8 uint8;
35typedef s8 int8;
36
37typedef uint64 PA;
38typedef uint32 PPN;
39typedef uint64 PPN64;
40
41typedef bool Bool;
42
43#define MAX_UINT32 U32_MAX
44#define MAX_UINT16 U16_MAX
45
46#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 000000000000..120eab830eaf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
1#ifndef _VM_BASIC_TYPES_H_
2#define _VM_BASIC_TYPES_H_
3#include <linux/kernel.h>
4
5typedef u32 uint32;
6typedef s32 int32;
7typedef u64 uint64;
8typedef u16 uint16;
9typedef s16 int16;
10typedef u8 uint8;
11typedef s8 int8;
12
13typedef uint64 PA;
14typedef uint32 PPN;
15typedef uint64 PPN64;
16
17typedef bool Bool;
18
19#define MAX_UINT32 U32_MAX
20
21#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 000000000000..7e7b0ce34aa2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
1/**********************************************************
2 * Copyright 2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 000000000000..e2e440ed3d44
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
1/**********************************************************
2 * Copyright 2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7dd15c5..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3D hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#include "svga_reg.h"
36
37typedef uint32 PPN;
38typedef __le64 PPN64;
39
40/*
41 * 3D Hardware Version
42 *
43 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
44 * register. Is set by the host and read by the guest. This lets
45 * us make new guest drivers which are backwards-compatible with old
46 * SVGA hardware revisions. It does not let us support old guest
47 * drivers. Good enough for now.
48 *
49 */
50
51#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
52#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
53#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
54
55typedef enum {
56 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
57 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
58 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
59 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
60 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
61 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
62 SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
63 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
64} SVGA3dHardwareVersion;
65
66/*
67 * Generic Types
68 */
69
70typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
71#define SVGA3D_NUM_CLIPPLANES 6
72#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
73#define SVGA3D_MAX_CONTEXT_IDS 256
74#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
75
76#define SVGA3D_NUM_TEXTURE_UNITS 32
77#define SVGA3D_NUM_LIGHTS 8
78
79/*
80 * Surface formats.
81 *
82 * If you modify this list, be sure to keep GLUtil.c in sync. It
83 * includes the internal format definition of each surface in
84 * GLUtil_ConvertSurfaceFormat, and it contains a table of
85 * human-readable names in GLUtil_GetFormatName.
86 */
87
88typedef enum SVGA3dSurfaceFormat {
89 SVGA3D_FORMAT_MIN = 0,
90 SVGA3D_FORMAT_INVALID = 0,
91
92 SVGA3D_X8R8G8B8 = 1,
93 SVGA3D_A8R8G8B8 = 2,
94
95 SVGA3D_R5G6B5 = 3,
96 SVGA3D_X1R5G5B5 = 4,
97 SVGA3D_A1R5G5B5 = 5,
98 SVGA3D_A4R4G4B4 = 6,
99
100 SVGA3D_Z_D32 = 7,
101 SVGA3D_Z_D16 = 8,
102 SVGA3D_Z_D24S8 = 9,
103 SVGA3D_Z_D15S1 = 10,
104
105 SVGA3D_LUMINANCE8 = 11,
106 SVGA3D_LUMINANCE4_ALPHA4 = 12,
107 SVGA3D_LUMINANCE16 = 13,
108 SVGA3D_LUMINANCE8_ALPHA8 = 14,
109
110 SVGA3D_DXT1 = 15,
111 SVGA3D_DXT2 = 16,
112 SVGA3D_DXT3 = 17,
113 SVGA3D_DXT4 = 18,
114 SVGA3D_DXT5 = 19,
115
116 SVGA3D_BUMPU8V8 = 20,
117 SVGA3D_BUMPL6V5U5 = 21,
118 SVGA3D_BUMPX8L8V8U8 = 22,
119 SVGA3D_BUMPL8V8U8 = 23,
120
121 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
122 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
123
124 SVGA3D_A2R10G10B10 = 26,
125
126 /* signed formats */
127 SVGA3D_V8U8 = 27,
128 SVGA3D_Q8W8V8U8 = 28,
129 SVGA3D_CxV8U8 = 29,
130
131 /* mixed formats */
132 SVGA3D_X8L8V8U8 = 30,
133 SVGA3D_A2W10V10U10 = 31,
134
135 SVGA3D_ALPHA8 = 32,
136
137 /* Single- and dual-component floating point formats */
138 SVGA3D_R_S10E5 = 33,
139 SVGA3D_R_S23E8 = 34,
140 SVGA3D_RG_S10E5 = 35,
141 SVGA3D_RG_S23E8 = 36,
142
143 SVGA3D_BUFFER = 37,
144
145 SVGA3D_Z_D24X8 = 38,
146
147 SVGA3D_V16U16 = 39,
148
149 SVGA3D_G16R16 = 40,
150 SVGA3D_A16B16G16R16 = 41,
151
152 /* Packed Video formats */
153 SVGA3D_UYVY = 42,
154 SVGA3D_YUY2 = 43,
155
156 /* Planar video formats */
157 SVGA3D_NV12 = 44,
158
159 /* Video format with alpha */
160 SVGA3D_AYUV = 45,
161
162 SVGA3D_R32G32B32A32_TYPELESS = 46,
163 SVGA3D_R32G32B32A32_FLOAT = 25,
164 SVGA3D_R32G32B32A32_UINT = 47,
165 SVGA3D_R32G32B32A32_SINT = 48,
166 SVGA3D_R32G32B32_TYPELESS = 49,
167 SVGA3D_R32G32B32_FLOAT = 50,
168 SVGA3D_R32G32B32_UINT = 51,
169 SVGA3D_R32G32B32_SINT = 52,
170 SVGA3D_R16G16B16A16_TYPELESS = 53,
171 SVGA3D_R16G16B16A16_FLOAT = 24,
172 SVGA3D_R16G16B16A16_UNORM = 41,
173 SVGA3D_R16G16B16A16_UINT = 54,
174 SVGA3D_R16G16B16A16_SNORM = 55,
175 SVGA3D_R16G16B16A16_SINT = 56,
176 SVGA3D_R32G32_TYPELESS = 57,
177 SVGA3D_R32G32_FLOAT = 36,
178 SVGA3D_R32G32_UINT = 58,
179 SVGA3D_R32G32_SINT = 59,
180 SVGA3D_R32G8X24_TYPELESS = 60,
181 SVGA3D_D32_FLOAT_S8X24_UINT = 61,
182 SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
183 SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
184 SVGA3D_R10G10B10A2_TYPELESS = 64,
185 SVGA3D_R10G10B10A2_UNORM = 26,
186 SVGA3D_R10G10B10A2_UINT = 65,
187 SVGA3D_R11G11B10_FLOAT = 66,
188 SVGA3D_R8G8B8A8_TYPELESS = 67,
189 SVGA3D_R8G8B8A8_UNORM = 68,
190 SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
191 SVGA3D_R8G8B8A8_UINT = 70,
192 SVGA3D_R8G8B8A8_SNORM = 28,
193 SVGA3D_R8G8B8A8_SINT = 71,
194 SVGA3D_R16G16_TYPELESS = 72,
195 SVGA3D_R16G16_FLOAT = 35,
196 SVGA3D_R16G16_UNORM = 40,
197 SVGA3D_R16G16_UINT = 73,
198 SVGA3D_R16G16_SNORM = 39,
199 SVGA3D_R16G16_SINT = 74,
200 SVGA3D_R32_TYPELESS = 75,
201 SVGA3D_D32_FLOAT = 76,
202 SVGA3D_R32_FLOAT = 34,
203 SVGA3D_R32_UINT = 77,
204 SVGA3D_R32_SINT = 78,
205 SVGA3D_R24G8_TYPELESS = 79,
206 SVGA3D_D24_UNORM_S8_UINT = 80,
207 SVGA3D_R24_UNORM_X8_TYPELESS = 81,
208 SVGA3D_X24_TYPELESS_G8_UINT = 82,
209 SVGA3D_R8G8_TYPELESS = 83,
210 SVGA3D_R8G8_UNORM = 84,
211 SVGA3D_R8G8_UINT = 85,
212 SVGA3D_R8G8_SNORM = 27,
213 SVGA3D_R8G8_SINT = 86,
214 SVGA3D_R16_TYPELESS = 87,
215 SVGA3D_R16_FLOAT = 33,
216 SVGA3D_D16_UNORM = 8,
217 SVGA3D_R16_UNORM = 88,
218 SVGA3D_R16_UINT = 89,
219 SVGA3D_R16_SNORM = 90,
220 SVGA3D_R16_SINT = 91,
221 SVGA3D_R8_TYPELESS = 92,
222 SVGA3D_R8_UNORM = 93,
223 SVGA3D_R8_UINT = 94,
224 SVGA3D_R8_SNORM = 95,
225 SVGA3D_R8_SINT = 96,
226 SVGA3D_A8_UNORM = 32,
227 SVGA3D_R1_UNORM = 97,
228 SVGA3D_R9G9B9E5_SHAREDEXP = 98,
229 SVGA3D_R8G8_B8G8_UNORM = 99,
230 SVGA3D_G8R8_G8B8_UNORM = 100,
231 SVGA3D_BC1_TYPELESS = 101,
232 SVGA3D_BC1_UNORM = 15,
233 SVGA3D_BC1_UNORM_SRGB = 102,
234 SVGA3D_BC2_TYPELESS = 103,
235 SVGA3D_BC2_UNORM = 17,
236 SVGA3D_BC2_UNORM_SRGB = 104,
237 SVGA3D_BC3_TYPELESS = 105,
238 SVGA3D_BC3_UNORM = 19,
239 SVGA3D_BC3_UNORM_SRGB = 106,
240 SVGA3D_BC4_TYPELESS = 107,
241 SVGA3D_BC4_UNORM = 108,
242 SVGA3D_BC4_SNORM = 109,
243 SVGA3D_BC5_TYPELESS = 110,
244 SVGA3D_BC5_UNORM = 111,
245 SVGA3D_BC5_SNORM = 112,
246 SVGA3D_B5G6R5_UNORM = 3,
247 SVGA3D_B5G5R5A1_UNORM = 5,
248 SVGA3D_B8G8R8A8_UNORM = 2,
249 SVGA3D_B8G8R8X8_UNORM = 1,
250 SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
251 SVGA3D_B8G8R8A8_TYPELESS = 114,
252 SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
253 SVGA3D_B8G8R8X8_TYPELESS = 116,
254 SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
255
256 /* Advanced D3D9 depth formats. */
257 SVGA3D_Z_DF16 = 118,
258 SVGA3D_Z_DF24 = 119,
259 SVGA3D_Z_D24S8_INT = 120,
260
261 /* Planar video formats. */
262 SVGA3D_YV12 = 121,
263
264 SVGA3D_FORMAT_MAX = 122,
265} SVGA3dSurfaceFormat;
266
267typedef uint32 SVGA3dColor; /* a, r, g, b */
268
269/*
270 * These match the D3DFORMAT_OP definitions used by Direct3D. We need
271 * them so that we can query the host for what the supported surface
272 * operations are (when we're using the D3D backend, in particular),
273 * and so we can send those operations to the guest.
274 */
275typedef enum {
276 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
277 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
278 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
279 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
280 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
281 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
282 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
283
284/*
285 * This format can be used as a render target if the current display mode
286 * is the same depth if the alpha channel is ignored. e.g. if the device
287 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
288 * format op list entry for A8R8G8B8 should have this cap.
289 */
290 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
291
292/*
293 * This format contains DirectDraw support (including Flip). This flag
294 * should not to be set on alpha formats.
295 */
296 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
297
298/*
299 * The rasterizer can support some level of Direct3D support in this format
300 * and implies that the driver can create a Context in this mode (for some
301 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
302 * flag must also be set.
303 */
304 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
305
306/*
307 * This is set for a private format when the driver has put the bpp in
308 * the structure.
309 */
310 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
311
312/*
313 * Indicates that this format can be converted to any RGB format for which
314 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
315 */
316 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
317
318/*
319 * Indicates that this format can be used to create offscreen plain surfaces.
320 */
321 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
322
323/*
324 * Indicated that this format can be read as an SRGB texture (meaning that the
325 * sampler will linearize the looked up data)
326 */
327 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
328
329/*
330 * Indicates that this format can be used in the bumpmap instructions
331 */
332 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
333
334/*
335 * Indicates that this format can be sampled by the displacement map sampler
336 */
337 SVGA3DFORMAT_OP_DMAP = 0x00020000,
338
339/*
340 * Indicates that this format cannot be used with texture filtering
341 */
342 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
343
344/*
345 * Indicates that format conversions are supported to this RGB format if
346 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
347 */
348 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
349
350/*
351 * Indicated that this format can be written as an SRGB target (meaning that the
352 * pixel pipe will DE-linearize data on output to format)
353 */
354 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
355
356/*
357 * Indicates that this format cannot be used with alpha blending
358 */
359 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
360
361/*
362 * Indicates that the device can auto-generated sublevels for resources
363 * of this format
364 */
365 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
366
367/*
368 * Indicates that this format can be used by vertex texture sampler
369 */
370 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
371
372/*
373 * Indicates that this format supports neither texture coordinate wrap
374 * modes, nor mipmapping
375 */
376 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
377} SVGA3dFormatOp;
378
379/*
380 * This structure is a conversion of SVGA3DFORMAT_OP_*.
381 * Entries must be located at the same position.
382 */
383typedef union {
384 uint32 value;
385 struct {
386 uint32 texture : 1;
387 uint32 volumeTexture : 1;
388 uint32 cubeTexture : 1;
389 uint32 offscreenRenderTarget : 1;
390 uint32 sameFormatRenderTarget : 1;
391 uint32 unknown1 : 1;
392 uint32 zStencil : 1;
393 uint32 zStencilArbitraryDepth : 1;
394 uint32 sameFormatUpToAlpha : 1;
395 uint32 unknown2 : 1;
396 uint32 displayMode : 1;
397 uint32 acceleration3d : 1;
398 uint32 pixelSize : 1;
399 uint32 convertToARGB : 1;
400 uint32 offscreenPlain : 1;
401 uint32 sRGBRead : 1;
402 uint32 bumpMap : 1;
403 uint32 dmap : 1;
404 uint32 noFilter : 1;
405 uint32 memberOfGroupARGB : 1;
406 uint32 sRGBWrite : 1;
407 uint32 noAlphaBlend : 1;
408 uint32 autoGenMipMap : 1;
409 uint32 vertexTexture : 1;
410 uint32 noTexCoordWrapNorMip : 1;
411 };
412} SVGA3dSurfaceFormatCaps;
413
414/*
415 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
416 * must fit in a uint32.
417 */
418
419typedef enum {
420 SVGA3D_RS_INVALID = 0,
421 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
422 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
423 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
424 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
425 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
426 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
427 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
428 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
429 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
430 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
431 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
432 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
433 SVGA3D_RS_STENCILREF = 13, /* uint32 */
434 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
435 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
436 SVGA3D_RS_FOGSTART = 16, /* float */
437 SVGA3D_RS_FOGEND = 17, /* float */
438 SVGA3D_RS_FOGDENSITY = 18, /* float */
439 SVGA3D_RS_POINTSIZE = 19, /* float */
440 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
441 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
442 SVGA3D_RS_POINTSCALE_A = 22, /* float */
443 SVGA3D_RS_POINTSCALE_B = 23, /* float */
444 SVGA3D_RS_POINTSCALE_C = 24, /* float */
445 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
446 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
447 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
448 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
449 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
450 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
451 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
452 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
453 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
454 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
455 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
456 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
457 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
458 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
459 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
460 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
461 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
462 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
463 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
464 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
465 SVGA3D_RS_ZBIAS = 45, /* float */
466 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
467 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
468 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
469 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
470 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
471 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
472 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
473 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
474 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
475 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
476 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
477 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
478 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
479 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
480 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
481 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
482 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
483 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
484 SVGA3D_RS_DEPTHBIAS = 64, /* float */
485
486
487 /*
488 * Output Gamma Level
489 *
490 * Output gamma effects the gamma curve of colors that are output from the
491 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
492 * value is <= 0.0, gamma correction is ignored and linear color space is
493 * used.
494 */
495
496 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
497 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
498 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
499 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
500 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
501 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
502 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
503 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
504 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
505 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
506 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
507 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
508 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
509 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
510 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
511 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
512 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
513 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
514 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
515 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
516 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
517 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
518 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
519 SVGA3D_RS_TWEENFACTOR = 88, /* float */
520 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
521 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
522 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
523 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
524 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
525 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
526 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
527 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
528 SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
529 SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
530 SVGA3D_RS_LINEWIDTH = 99, /* float */
531 SVGA3D_RS_MAX
532} SVGA3dRenderStateName;
533
534typedef enum {
535 SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
536 SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
537 SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
538 SVGA3D_TRANSPARENCYANTIALIAS_MAX
539} SVGA3dTransparencyAntialiasType;
540
541typedef enum {
542 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
543 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
544 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
545} SVGA3dVertexMaterial;
546
547typedef enum {
548 SVGA3D_FILLMODE_INVALID = 0,
549 SVGA3D_FILLMODE_POINT = 1,
550 SVGA3D_FILLMODE_LINE = 2,
551 SVGA3D_FILLMODE_FILL = 3,
552 SVGA3D_FILLMODE_MAX
553} SVGA3dFillModeType;
554
555
556typedef
557union {
558 struct {
559 uint16 mode; /* SVGA3dFillModeType */
560 uint16 face; /* SVGA3dFace */
561 };
562 uint32 uintValue;
563} SVGA3dFillMode;
564
565typedef enum {
566 SVGA3D_SHADEMODE_INVALID = 0,
567 SVGA3D_SHADEMODE_FLAT = 1,
568 SVGA3D_SHADEMODE_SMOOTH = 2,
569 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
570 SVGA3D_SHADEMODE_MAX
571} SVGA3dShadeMode;
572
573typedef
574union {
575 struct {
576 uint16 repeat;
577 uint16 pattern;
578 };
579 uint32 uintValue;
580} SVGA3dLinePattern;
581
582typedef enum {
583 SVGA3D_BLENDOP_INVALID = 0,
584 SVGA3D_BLENDOP_ZERO = 1,
585 SVGA3D_BLENDOP_ONE = 2,
586 SVGA3D_BLENDOP_SRCCOLOR = 3,
587 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
588 SVGA3D_BLENDOP_SRCALPHA = 5,
589 SVGA3D_BLENDOP_INVSRCALPHA = 6,
590 SVGA3D_BLENDOP_DESTALPHA = 7,
591 SVGA3D_BLENDOP_INVDESTALPHA = 8,
592 SVGA3D_BLENDOP_DESTCOLOR = 9,
593 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
594 SVGA3D_BLENDOP_SRCALPHASAT = 11,
595 SVGA3D_BLENDOP_BLENDFACTOR = 12,
596 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
597 SVGA3D_BLENDOP_MAX
598} SVGA3dBlendOp;
599
600typedef enum {
601 SVGA3D_BLENDEQ_INVALID = 0,
602 SVGA3D_BLENDEQ_ADD = 1,
603 SVGA3D_BLENDEQ_SUBTRACT = 2,
604 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
605 SVGA3D_BLENDEQ_MINIMUM = 4,
606 SVGA3D_BLENDEQ_MAXIMUM = 5,
607 SVGA3D_BLENDEQ_MAX
608} SVGA3dBlendEquation;
609
610typedef enum {
611 SVGA3D_FRONTWINDING_INVALID = 0,
612 SVGA3D_FRONTWINDING_CW = 1,
613 SVGA3D_FRONTWINDING_CCW = 2,
614 SVGA3D_FRONTWINDING_MAX
615} SVGA3dFrontWinding;
616
617typedef enum {
618 SVGA3D_FACE_INVALID = 0,
619 SVGA3D_FACE_NONE = 1,
620 SVGA3D_FACE_FRONT = 2,
621 SVGA3D_FACE_BACK = 3,
622 SVGA3D_FACE_FRONT_BACK = 4,
623 SVGA3D_FACE_MAX
624} SVGA3dFace;
625
626/*
627 * The order and the values should not be changed
628 */
629
630typedef enum {
631 SVGA3D_CMP_INVALID = 0,
632 SVGA3D_CMP_NEVER = 1,
633 SVGA3D_CMP_LESS = 2,
634 SVGA3D_CMP_EQUAL = 3,
635 SVGA3D_CMP_LESSEQUAL = 4,
636 SVGA3D_CMP_GREATER = 5,
637 SVGA3D_CMP_NOTEQUAL = 6,
638 SVGA3D_CMP_GREATEREQUAL = 7,
639 SVGA3D_CMP_ALWAYS = 8,
640 SVGA3D_CMP_MAX
641} SVGA3dCmpFunc;
642
643/*
644 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
645 * the fog factor to be specified in the alpha component of the specular
646 * (a.k.a. secondary) vertex color.
647 */
648typedef enum {
649 SVGA3D_FOGFUNC_INVALID = 0,
650 SVGA3D_FOGFUNC_EXP = 1,
651 SVGA3D_FOGFUNC_EXP2 = 2,
652 SVGA3D_FOGFUNC_LINEAR = 3,
653 SVGA3D_FOGFUNC_PER_VERTEX = 4
654} SVGA3dFogFunction;
655
656/*
657 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
658 * or per-pixel basis.
659 */
660typedef enum {
661 SVGA3D_FOGTYPE_INVALID = 0,
662 SVGA3D_FOGTYPE_VERTEX = 1,
663 SVGA3D_FOGTYPE_PIXEL = 2,
664 SVGA3D_FOGTYPE_MAX = 3
665} SVGA3dFogType;
666
667/*
668 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
669 * computed using the eye Z value of each pixel (or vertex), whereas range-
670 * based fog is computed using the actual distance (range) to the eye.
671 */
672typedef enum {
673 SVGA3D_FOGBASE_INVALID = 0,
674 SVGA3D_FOGBASE_DEPTHBASED = 1,
675 SVGA3D_FOGBASE_RANGEBASED = 2,
676 SVGA3D_FOGBASE_MAX = 3
677} SVGA3dFogBase;
678
679typedef enum {
680 SVGA3D_STENCILOP_INVALID = 0,
681 SVGA3D_STENCILOP_KEEP = 1,
682 SVGA3D_STENCILOP_ZERO = 2,
683 SVGA3D_STENCILOP_REPLACE = 3,
684 SVGA3D_STENCILOP_INCRSAT = 4,
685 SVGA3D_STENCILOP_DECRSAT = 5,
686 SVGA3D_STENCILOP_INVERT = 6,
687 SVGA3D_STENCILOP_INCR = 7,
688 SVGA3D_STENCILOP_DECR = 8,
689 SVGA3D_STENCILOP_MAX
690} SVGA3dStencilOp;
691
692typedef enum {
693 SVGA3D_CLIPPLANE_0 = (1 << 0),
694 SVGA3D_CLIPPLANE_1 = (1 << 1),
695 SVGA3D_CLIPPLANE_2 = (1 << 2),
696 SVGA3D_CLIPPLANE_3 = (1 << 3),
697 SVGA3D_CLIPPLANE_4 = (1 << 4),
698 SVGA3D_CLIPPLANE_5 = (1 << 5),
699} SVGA3dClipPlanes;
700
701typedef enum {
702 SVGA3D_CLEAR_COLOR = 0x1,
703 SVGA3D_CLEAR_DEPTH = 0x2,
704 SVGA3D_CLEAR_STENCIL = 0x4
705} SVGA3dClearFlag;
706
707typedef enum {
708 SVGA3D_RT_DEPTH = 0,
709 SVGA3D_RT_STENCIL = 1,
710 SVGA3D_RT_COLOR0 = 2,
711 SVGA3D_RT_COLOR1 = 3,
712 SVGA3D_RT_COLOR2 = 4,
713 SVGA3D_RT_COLOR3 = 5,
714 SVGA3D_RT_COLOR4 = 6,
715 SVGA3D_RT_COLOR5 = 7,
716 SVGA3D_RT_COLOR6 = 8,
717 SVGA3D_RT_COLOR7 = 9,
718 SVGA3D_RT_MAX,
719 SVGA3D_RT_INVALID = ((uint32)-1),
720} SVGA3dRenderTargetType;
721
722#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
723
724typedef
725union {
726 struct {
727 uint32 red : 1;
728 uint32 green : 1;
729 uint32 blue : 1;
730 uint32 alpha : 1;
731 };
732 uint32 uintValue;
733} SVGA3dColorMask;
734
735typedef enum {
736 SVGA3D_VBLEND_DISABLE = 0,
737 SVGA3D_VBLEND_1WEIGHT = 1,
738 SVGA3D_VBLEND_2WEIGHT = 2,
739 SVGA3D_VBLEND_3WEIGHT = 3,
740} SVGA3dVertexBlendFlags;
741
742typedef enum {
743 SVGA3D_WRAPCOORD_0 = 1 << 0,
744 SVGA3D_WRAPCOORD_1 = 1 << 1,
745 SVGA3D_WRAPCOORD_2 = 1 << 2,
746 SVGA3D_WRAPCOORD_3 = 1 << 3,
747 SVGA3D_WRAPCOORD_ALL = 0xF,
748} SVGA3dWrapFlags;
749
750/*
751 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
752 * must fit in a uint32.
753 */
754
755typedef enum {
756 SVGA3D_TS_INVALID = 0,
757 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
758 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
759 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
760 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
761 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
762 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
763 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
764 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
765 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
766 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
767 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
768 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
769 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
770 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
771 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
772 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
773 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
774 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
775 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
776 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
777 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
778 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
779 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
780 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
781
782
783 /*
784 * Sampler Gamma Level
785 *
786 * Sampler gamma effects the color of samples taken from the sampler. A
787 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
788 * gamma value is ignored and a linear space is used.
789 */
790
791 SVGA3D_TS_GAMMA = 25, /* float */
792 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
793 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
794 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
795 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
796 SVGA3D_TS_MAX
797} SVGA3dTextureStateName;
798
799typedef enum {
800 SVGA3D_TC_INVALID = 0,
801 SVGA3D_TC_DISABLE = 1,
802 SVGA3D_TC_SELECTARG1 = 2,
803 SVGA3D_TC_SELECTARG2 = 3,
804 SVGA3D_TC_MODULATE = 4,
805 SVGA3D_TC_ADD = 5,
806 SVGA3D_TC_ADDSIGNED = 6,
807 SVGA3D_TC_SUBTRACT = 7,
808 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
809 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
810 SVGA3D_TC_BLENDCURRENTALPHA = 10,
811 SVGA3D_TC_BLENDFACTORALPHA = 11,
812 SVGA3D_TC_MODULATE2X = 12,
813 SVGA3D_TC_MODULATE4X = 13,
814 SVGA3D_TC_DSDT = 14,
815 SVGA3D_TC_DOTPRODUCT3 = 15,
816 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
817 SVGA3D_TC_ADDSIGNED2X = 17,
818 SVGA3D_TC_ADDSMOOTH = 18,
819 SVGA3D_TC_PREMODULATE = 19,
820 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
821 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
822 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
823 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
824 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
825 SVGA3D_TC_MULTIPLYADD = 25,
826 SVGA3D_TC_LERP = 26,
827 SVGA3D_TC_MAX
828} SVGA3dTextureCombiner;
829
830#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
831
832typedef enum {
833 SVGA3D_TEX_ADDRESS_INVALID = 0,
834 SVGA3D_TEX_ADDRESS_WRAP = 1,
835 SVGA3D_TEX_ADDRESS_MIRROR = 2,
836 SVGA3D_TEX_ADDRESS_CLAMP = 3,
837 SVGA3D_TEX_ADDRESS_BORDER = 4,
838 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
839 SVGA3D_TEX_ADDRESS_EDGE = 6,
840 SVGA3D_TEX_ADDRESS_MAX
841} SVGA3dTextureAddress;
842
843/*
844 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
845 * disabled, and the rasterizer should use the magnification filter instead.
846 */
847typedef enum {
848 SVGA3D_TEX_FILTER_NONE = 0,
849 SVGA3D_TEX_FILTER_NEAREST = 1,
850 SVGA3D_TEX_FILTER_LINEAR = 2,
851 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
852 SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
853 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
854 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
855 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
856 SVGA3D_TEX_FILTER_MAX
857} SVGA3dTextureFilter;
858
859typedef enum {
860 SVGA3D_TEX_TRANSFORM_OFF = 0,
861 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
862 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
863 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
864 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
865 SVGA3D_TEX_PROJECTED = (1 << 15),
866} SVGA3dTexTransformFlags;
867
868typedef enum {
869 SVGA3D_TEXCOORD_GEN_OFF = 0,
870 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
871 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
872 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
873 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
874 SVGA3D_TEXCOORD_GEN_MAX
875} SVGA3dTextureCoordGen;
876
877/*
878 * Texture argument constants for texture combiner
879 */
880typedef enum {
881 SVGA3D_TA_INVALID = 0,
882 SVGA3D_TA_CONSTANT = 1,
883 SVGA3D_TA_PREVIOUS = 2,
884 SVGA3D_TA_DIFFUSE = 3,
885 SVGA3D_TA_TEXTURE = 4,
886 SVGA3D_TA_SPECULAR = 5,
887 SVGA3D_TA_MAX
888} SVGA3dTextureArgData;
889
890#define SVGA3D_TM_MASK_LEN 4
891
892/* Modifiers for texture argument constants defined above. */
893typedef enum {
894 SVGA3D_TM_NONE = 0,
895 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
896 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
897} SVGA3dTextureArgModifier;
898
899#define SVGA3D_INVALID_ID ((uint32)-1)
900#define SVGA3D_MAX_CLIP_PLANES 6
901
902/*
903 * This is the limit to the number of fixed-function texture
904 * transforms and texture coordinates we can support. It does *not*
905 * correspond to the number of texture image units (samplers) we
906 * support!
907 */
908#define SVGA3D_MAX_TEXTURE_COORDS 8
909
910/*
911 * Vertex declarations
912 *
913 * Notes:
914 *
915 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
916 * draw with any POSITIONT vertex arrays, the programmable vertex
917 * pipeline will be implicitly disabled. Drawing will take place as if
918 * no vertex shader was bound.
919 */
920
921typedef enum {
922 SVGA3D_DECLUSAGE_POSITION = 0,
923 SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
924 SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
925 SVGA3D_DECLUSAGE_NORMAL, /* 3 */
926 SVGA3D_DECLUSAGE_PSIZE, /* 4 */
927 SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
928 SVGA3D_DECLUSAGE_TANGENT, /* 6 */
929 SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
930 SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
931 SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
932 SVGA3D_DECLUSAGE_COLOR, /* 10 */
933 SVGA3D_DECLUSAGE_FOG, /* 11 */
934 SVGA3D_DECLUSAGE_DEPTH, /* 12 */
935 SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
936 SVGA3D_DECLUSAGE_MAX
937} SVGA3dDeclUsage;
938
939typedef enum {
940 SVGA3D_DECLMETHOD_DEFAULT = 0,
941 SVGA3D_DECLMETHOD_PARTIALU,
942 SVGA3D_DECLMETHOD_PARTIALV,
943 SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
944 SVGA3D_DECLMETHOD_UV,
945 SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
946 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
947} SVGA3dDeclMethod;
948
949typedef enum {
950 SVGA3D_DECLTYPE_FLOAT1 = 0,
951 SVGA3D_DECLTYPE_FLOAT2 = 1,
952 SVGA3D_DECLTYPE_FLOAT3 = 2,
953 SVGA3D_DECLTYPE_FLOAT4 = 3,
954 SVGA3D_DECLTYPE_D3DCOLOR = 4,
955 SVGA3D_DECLTYPE_UBYTE4 = 5,
956 SVGA3D_DECLTYPE_SHORT2 = 6,
957 SVGA3D_DECLTYPE_SHORT4 = 7,
958 SVGA3D_DECLTYPE_UBYTE4N = 8,
959 SVGA3D_DECLTYPE_SHORT2N = 9,
960 SVGA3D_DECLTYPE_SHORT4N = 10,
961 SVGA3D_DECLTYPE_USHORT2N = 11,
962 SVGA3D_DECLTYPE_USHORT4N = 12,
963 SVGA3D_DECLTYPE_UDEC3 = 13,
964 SVGA3D_DECLTYPE_DEC3N = 14,
965 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
966 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
967 SVGA3D_DECLTYPE_MAX,
968} SVGA3dDeclType;
969
970/*
971 * This structure is used for the divisor for geometry instancing;
972 * it's a direct translation of the Direct3D equivalent.
973 */
974typedef union {
975 struct {
976 /*
977 * For index data, this number represents the number of instances to draw.
978 * For instance data, this number represents the number of
979 * instances/vertex in this stream
980 */
981 uint32 count : 30;
982
983 /*
984 * This is 1 if this is supposed to be the data that is repeated for
985 * every instance.
986 */
987 uint32 indexedData : 1;
988
989 /*
990 * This is 1 if this is supposed to be the per-instance data.
991 */
992 uint32 instanceData : 1;
993 };
994
995 uint32 value;
996} SVGA3dVertexDivisor;
997
998typedef enum {
999 SVGA3D_PRIMITIVE_INVALID = 0,
1000 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
1001 SVGA3D_PRIMITIVE_POINTLIST = 2,
1002 SVGA3D_PRIMITIVE_LINELIST = 3,
1003 SVGA3D_PRIMITIVE_LINESTRIP = 4,
1004 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
1005 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
1006 SVGA3D_PRIMITIVE_MAX
1007} SVGA3dPrimitiveType;
1008
1009typedef enum {
1010 SVGA3D_COORDINATE_INVALID = 0,
1011 SVGA3D_COORDINATE_LEFTHANDED = 1,
1012 SVGA3D_COORDINATE_RIGHTHANDED = 2,
1013 SVGA3D_COORDINATE_MAX
1014} SVGA3dCoordinateType;
1015
1016typedef enum {
1017 SVGA3D_TRANSFORM_INVALID = 0,
1018 SVGA3D_TRANSFORM_WORLD = 1,
1019 SVGA3D_TRANSFORM_VIEW = 2,
1020 SVGA3D_TRANSFORM_PROJECTION = 3,
1021 SVGA3D_TRANSFORM_TEXTURE0 = 4,
1022 SVGA3D_TRANSFORM_TEXTURE1 = 5,
1023 SVGA3D_TRANSFORM_TEXTURE2 = 6,
1024 SVGA3D_TRANSFORM_TEXTURE3 = 7,
1025 SVGA3D_TRANSFORM_TEXTURE4 = 8,
1026 SVGA3D_TRANSFORM_TEXTURE5 = 9,
1027 SVGA3D_TRANSFORM_TEXTURE6 = 10,
1028 SVGA3D_TRANSFORM_TEXTURE7 = 11,
1029 SVGA3D_TRANSFORM_WORLD1 = 12,
1030 SVGA3D_TRANSFORM_WORLD2 = 13,
1031 SVGA3D_TRANSFORM_WORLD3 = 14,
1032 SVGA3D_TRANSFORM_MAX
1033} SVGA3dTransformType;
1034
1035typedef enum {
1036 SVGA3D_LIGHTTYPE_INVALID = 0,
1037 SVGA3D_LIGHTTYPE_POINT = 1,
1038 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
1039 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
1040 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
1041 SVGA3D_LIGHTTYPE_MAX
1042} SVGA3dLightType;
1043
1044typedef enum {
1045 SVGA3D_CUBEFACE_POSX = 0,
1046 SVGA3D_CUBEFACE_NEGX = 1,
1047 SVGA3D_CUBEFACE_POSY = 2,
1048 SVGA3D_CUBEFACE_NEGY = 3,
1049 SVGA3D_CUBEFACE_POSZ = 4,
1050 SVGA3D_CUBEFACE_NEGZ = 5,
1051} SVGA3dCubeFace;
1052
1053typedef enum {
1054 SVGA3D_SHADERTYPE_INVALID = 0,
1055 SVGA3D_SHADERTYPE_MIN = 1,
1056 SVGA3D_SHADERTYPE_VS = 1,
1057 SVGA3D_SHADERTYPE_PS = 2,
1058 SVGA3D_SHADERTYPE_MAX = 3,
1059 SVGA3D_SHADERTYPE_GS = 3,
1060} SVGA3dShaderType;
1061
1062#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
1063
1064typedef enum {
1065 SVGA3D_CONST_TYPE_FLOAT = 0,
1066 SVGA3D_CONST_TYPE_INT = 1,
1067 SVGA3D_CONST_TYPE_BOOL = 2,
1068 SVGA3D_CONST_TYPE_MAX
1069} SVGA3dShaderConstType;
1070
1071#define SVGA3D_MAX_SURFACE_FACES 6
1072
1073typedef enum {
1074 SVGA3D_STRETCH_BLT_POINT = 0,
1075 SVGA3D_STRETCH_BLT_LINEAR = 1,
1076 SVGA3D_STRETCH_BLT_MAX
1077} SVGA3dStretchBltMode;
1078
1079typedef enum {
1080 SVGA3D_QUERYTYPE_OCCLUSION = 0,
1081 SVGA3D_QUERYTYPE_MAX
1082} SVGA3dQueryType;
1083
1084typedef enum {
1085 SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
1086 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
1087 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
1088 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
1089} SVGA3dQueryState;
1090
1091typedef enum {
1092 SVGA3D_WRITE_HOST_VRAM = 1,
1093 SVGA3D_READ_HOST_VRAM = 2,
1094} SVGA3dTransferType;
1095
1096/*
1097 * The maximum number of vertex arrays we're guaranteed to support in
1098 * SVGA_3D_CMD_DRAWPRIMITIVES.
1099 */
1100#define SVGA3D_MAX_VERTEX_ARRAYS 32
1101
1102/*
1103 * The maximum number of primitive ranges we're guaranteed to support
1104 * in SVGA_3D_CMD_DRAWPRIMITIVES.
1105 */
1106#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
1107
1108/*
1109 * Identifiers for commands in the command FIFO.
1110 *
1111 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
1112 * the SVGA3D protocol and remain reserved; they should not be used in the
1113 * future.
1114 *
1115 * IDs between 1040 and 1999 (inclusive) are available for use by the
1116 * current SVGA3D protocol.
1117 *
1118 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
1119 * and up.
1120 */
1121
1122#define SVGA_3D_CMD_LEGACY_BASE 1000
1123#define SVGA_3D_CMD_BASE 1040
1124
1125#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
1126#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
1127#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
1128#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
1129#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
1130#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
1131#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
1132#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
1133#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
1134#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
1135#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
1136#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
1137#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
1138#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
1139#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
1140#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
1141#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
1142#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
1143#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
1144#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
1145#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
1146#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
1147#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
1148#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
1149#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
1150#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
1151#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
1152#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
1153#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
1154#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
1155#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
1156#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
1157#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
1158#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
1159#define SVGA_3D_CMD_SCREEN_DMA 1082
1160#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
1161#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
1162
1163#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
1164#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
1165#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
1166#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
1167#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
1168#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
1169
1170#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
1171#define SVGA_3D_CMD_READBACK_OTABLE 1092
1172
1173#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
1174#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
1175#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
1176#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
1177
1178#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
1179#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
1180#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
1181#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
1182#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
1183#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
1184#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
1185#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
1186#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
1187#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
1188
1189#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
1190#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
1191#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
1192#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
1193#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
1194
1195#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
1196#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
1197#define SVGA_3D_CMD_BIND_GB_SHADER 1114
1198
1199#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
1200
1201#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
1202#define SVGA_3D_CMD_END_GB_QUERY 1117
1203#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
1204
1205#define SVGA_3D_CMD_NOP 1119
1206
1207#define SVGA_3D_CMD_ENABLE_GART 1120
1208#define SVGA_3D_CMD_DISABLE_GART 1121
1209#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
1210#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
1211
1212#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
1213#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
1214#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
1215#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
1216
1217#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
1218#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
1219
1220#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
1221#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
1222#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
1223#define SVGA_3D_CMD_GB_MOB_FENCE 1133
1224#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
1225#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
1226#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
1227#define SVGA_3D_CMD_NOP_ERROR 1137
1228
1229#define SVGA_3D_CMD_RESERVED1 1138
1230#define SVGA_3D_CMD_RESERVED2 1139
1231#define SVGA_3D_CMD_RESERVED3 1140
1232#define SVGA_3D_CMD_RESERVED4 1141
1233#define SVGA_3D_CMD_RESERVED5 1142
1234
1235#define SVGA_3D_CMD_MAX 1142
1236#define SVGA_3D_CMD_FUTURE_MAX 3000
1237
1238/*
1239 * Common substructures used in multiple FIFO commands:
1240 */
1241
1242typedef struct {
1243 union {
1244 struct {
1245 uint16 function; /* SVGA3dFogFunction */
1246 uint8 type; /* SVGA3dFogType */
1247 uint8 base; /* SVGA3dFogBase */
1248 };
1249 uint32 uintValue;
1250 };
1251} SVGA3dFogMode;
1252
1253/*
1254 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1255 * is a surface ID as well as face/mipmap indices.
1256 */
1257
1258typedef
1259struct SVGA3dSurfaceImageId {
1260 uint32 sid;
1261 uint32 face;
1262 uint32 mipmap;
1263} SVGA3dSurfaceImageId;
1264
1265typedef
1266struct SVGA3dGuestImage {
1267 SVGAGuestPtr ptr;
1268
1269 /*
1270 * A note on interpretation of pitch: This value of pitch is the
1271 * number of bytes between vertically adjacent image
1272 * blocks. Normally this is the number of bytes between the first
1273 * pixel of two adjacent scanlines. With compressed textures,
1274 * however, this may represent the number of bytes between
1275 * compression blocks rather than between rows of pixels.
1276 *
1277 * XXX: Compressed textures currently must be tightly packed in guest memory.
1278 *
1279 * If the image is 1-dimensional, pitch is ignored.
1280 *
1281 * If 'pitch' is zero, the SVGA3D device calculates a pitch value
1282 * assuming each row of blocks is tightly packed.
1283 */
1284 uint32 pitch;
1285} SVGA3dGuestImage;
1286
1287
1288/*
1289 * FIFO command format definitions:
1290 */
1291
1292/*
1293 * The data size header following cmdNum for every 3d command
1294 */
1295typedef
1296struct {
1297 uint32 id;
1298 uint32 size;
1299} SVGA3dCmdHeader;
1300
1301/*
1302 * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
1303 * optional mipmaps and cube faces.
1304 */
1305
1306typedef
1307struct {
1308 uint32 width;
1309 uint32 height;
1310 uint32 depth;
1311} SVGA3dSize;
1312
1313typedef enum {
1314 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
1315 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
1316 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
1317 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
1318 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
1319 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
1320 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
1321 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
1322 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
1323 SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
1324 SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
1325} SVGA3dSurfaceFlags;
1326
1327typedef
1328struct {
1329 uint32 numMipLevels;
1330} SVGA3dSurfaceFace;
1331
1332typedef
1333struct {
1334 uint32 sid;
1335 SVGA3dSurfaceFlags surfaceFlags;
1336 SVGA3dSurfaceFormat format;
1337 /*
1338 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
1339 * structures must have the same value of numMipLevels field.
1340 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
1341 * numMipLevels set to 0.
1342 */
1343 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1344 /*
1345 * Followed by an SVGA3dSize structure for each mip level in each face.
1346 *
1347 * A note on surface sizes: Sizes are always specified in pixels,
1348 * even if the true surface size is not a multiple of the minimum
1349 * block size of the surface's format. For example, a 3x3x1 DXT1
1350 * compressed texture would actually be stored as a 4x4x1 image in
1351 * memory.
1352 */
1353} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
1354
1355typedef
1356struct {
1357 uint32 sid;
1358 SVGA3dSurfaceFlags surfaceFlags;
1359 SVGA3dSurfaceFormat format;
1360 /*
1361 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
1362 * structures must have the same value of numMipLevels field.
1363 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
1364 * numMipLevels set to 0.
1365 */
1366 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1367 uint32 multisampleCount;
1368 SVGA3dTextureFilter autogenFilter;
1369 /*
1370 * Followed by an SVGA3dSize structure for each mip level in each face.
1371 *
1372 * A note on surface sizes: Sizes are always specified in pixels,
1373 * even if the true surface size is not a multiple of the minimum
1374 * block size of the surface's format. For example, a 3x3x1 DXT1
1375 * compressed texture would actually be stored as a 4x4x1 image in
1376 * memory.
1377 */
1378} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
1379
1380typedef
1381struct {
1382 uint32 sid;
1383} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
1384
1385typedef
1386struct {
1387 uint32 cid;
1388} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
1389
1390typedef
1391struct {
1392 uint32 cid;
1393} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
1394
1395typedef
1396struct {
1397 uint32 cid;
1398 SVGA3dClearFlag clearFlag;
1399 uint32 color;
1400 float depth;
1401 uint32 stencil;
1402 /* Followed by variable number of SVGA3dRect structures */
1403} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
1404
1405typedef
1406struct SVGA3dCopyRect {
1407 uint32 x;
1408 uint32 y;
1409 uint32 w;
1410 uint32 h;
1411 uint32 srcx;
1412 uint32 srcy;
1413} SVGA3dCopyRect;
1414
1415typedef
1416struct SVGA3dCopyBox {
1417 uint32 x;
1418 uint32 y;
1419 uint32 z;
1420 uint32 w;
1421 uint32 h;
1422 uint32 d;
1423 uint32 srcx;
1424 uint32 srcy;
1425 uint32 srcz;
1426} SVGA3dCopyBox;
1427
1428typedef
1429struct {
1430 uint32 x;
1431 uint32 y;
1432 uint32 w;
1433 uint32 h;
1434} SVGA3dRect;
1435
1436typedef
1437struct {
1438 uint32 x;
1439 uint32 y;
1440 uint32 z;
1441 uint32 w;
1442 uint32 h;
1443 uint32 d;
1444} SVGA3dBox;
1445
1446typedef
1447struct {
1448 uint32 x;
1449 uint32 y;
1450 uint32 z;
1451} SVGA3dPoint;
1452
1453typedef
1454struct {
1455 SVGA3dLightType type;
1456 SVGA3dBool inWorldSpace;
1457 float diffuse[4];
1458 float specular[4];
1459 float ambient[4];
1460 float position[4];
1461 float direction[4];
1462 float range;
1463 float falloff;
1464 float attenuation0;
1465 float attenuation1;
1466 float attenuation2;
1467 float theta;
1468 float phi;
1469} SVGA3dLightData;
1470
1471typedef
1472struct {
1473 uint32 sid;
1474 /* Followed by variable number of SVGA3dCopyRect structures */
1475} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
1476
1477typedef
1478struct {
1479 SVGA3dRenderStateName state;
1480 union {
1481 uint32 uintValue;
1482 float floatValue;
1483 };
1484} SVGA3dRenderState;
1485
1486typedef
1487struct {
1488 uint32 cid;
1489 /* Followed by variable number of SVGA3dRenderState structures */
1490} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
1491
1492typedef
1493struct {
1494 uint32 cid;
1495 SVGA3dRenderTargetType type;
1496 SVGA3dSurfaceImageId target;
1497} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
1498
1499typedef
1500struct {
1501 SVGA3dSurfaceImageId src;
1502 SVGA3dSurfaceImageId dest;
1503 /* Followed by variable number of SVGA3dCopyBox structures */
1504} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
1505
1506typedef
1507struct {
1508 SVGA3dSurfaceImageId src;
1509 SVGA3dSurfaceImageId dest;
1510 SVGA3dBox boxSrc;
1511 SVGA3dBox boxDest;
1512 SVGA3dStretchBltMode mode;
1513} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
1514
1515typedef
1516struct {
1517 /*
1518 * If the discard flag is present in a surface DMA operation, the host may
1519 * discard the contents of the current mipmap level and face of the target
1520 * surface before applying the surface DMA contents.
1521 */
1522 uint32 discard : 1;
1523
1524 /*
1525 * If the unsynchronized flag is present, the host may perform this upload
1526 * without syncing to pending reads on this surface.
1527 */
1528 uint32 unsynchronized : 1;
1529
1530 /*
1531 * Guests *MUST* set the reserved bits to 0 before submitting the command
1532 * suffix as future flags may occupy these bits.
1533 */
1534 uint32 reserved : 30;
1535} SVGA3dSurfaceDMAFlags;
1536
1537typedef
1538struct {
1539 SVGA3dGuestImage guest;
1540 SVGA3dSurfaceImageId host;
1541 SVGA3dTransferType transfer;
1542 /*
1543 * Followed by variable number of SVGA3dCopyBox structures. For consistency
1544 * in all clipping logic and coordinate translation, we define the
1545 * "source" in each copyBox as the guest image and the
1546 * "destination" as the host image, regardless of transfer
1547 * direction.
1548 *
1549 * For efficiency, the SVGA3D device is free to copy more data than
1550 * specified. For example, it may round copy boxes outwards such
1551 * that they lie on particular alignment boundaries.
1552 */
1553} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
1554
1555/*
1556 * SVGA3dCmdSurfaceDMASuffix --
1557 *
1558 * This is a command suffix that will appear after a SurfaceDMA command in
1559 * the FIFO. It contains some extra information that hosts may use to
1560 * optimize performance or protect the guest. This suffix exists to preserve
1561 * backwards compatibility while also allowing for new functionality to be
1562 * implemented.
1563 */
1564
1565typedef
1566struct {
1567 uint32 suffixSize;
1568
1569 /*
1570 * The maximum offset is used to determine the maximum offset from the
1571 * guestPtr base address that will be accessed or written to during this
1572 * surfaceDMA. If the suffix is supported, the host will respect this
1573 * boundary while performing surface DMAs.
1574 *
1575 * Defaults to MAX_UINT32
1576 */
1577 uint32 maximumOffset;
1578
1579 /*
1580 * A set of flags that describes optimizations that the host may perform
1581 * while performing this surface DMA operation. The guest should never rely
1582 * on behaviour that is different when these flags are set for correctness.
1583 *
1584 * Defaults to 0
1585 */
1586 SVGA3dSurfaceDMAFlags flags;
1587} SVGA3dCmdSurfaceDMASuffix;
1588
1589/*
1590 * SVGA_3D_CMD_DRAW_PRIMITIVES --
1591 *
1592 * This command is the SVGA3D device's generic drawing entry point.
1593 * It can draw multiple ranges of primitives, optionally using an
1594 * index buffer, using an arbitrary collection of vertex buffers.
1595 *
1596 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
1597 * during this draw call. The declarations specify which surface
1598 * the vertex data lives in, what that vertex data is used for,
1599 * and how to interpret it.
1600 *
1601 * Each SVGA3dPrimitiveRange defines a collection of primitives
1602 * to render using the same vertex arrays. An index buffer is
1603 * optional.
1604 */
1605
1606typedef
1607struct {
1608 /*
1609 * A range hint is an optional specification for the range of indices
1610 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
1611 * that the entire array will be used.
1612 *
1613 * These are only hints. The SVGA3D device may use them for
1614 * performance optimization if possible, but it's also allowed to
1615 * ignore these values.
1616 */
1617 uint32 first;
1618 uint32 last;
1619} SVGA3dArrayRangeHint;
1620
1621typedef
1622struct {
1623 /*
1624 * Define the origin and shape of a vertex or index array. Both
1625 * 'offset' and 'stride' are in bytes. The provided surface will be
1626 * reinterpreted as a flat array of bytes in the same format used
1627 * by surface DMA operations. To avoid unnecessary conversions, the
1628 * surface should be created with the SVGA3D_BUFFER format.
1629 *
1630 * Index 0 in the array starts 'offset' bytes into the surface.
1631 * Index 1 begins at byte 'offset + stride', etc. Array indices may
1632 * not be negative.
1633 */
1634 uint32 surfaceId;
1635 uint32 offset;
1636 uint32 stride;
1637} SVGA3dArray;
1638
1639typedef
1640struct {
1641 /*
1642 * Describe a vertex array's data type, and define how it is to be
1643 * used by the fixed function pipeline or the vertex shader. It
1644 * isn't useful to have two VertexDecls with the same
1645 * VertexArrayIdentity in one draw call.
1646 */
1647 SVGA3dDeclType type;
1648 SVGA3dDeclMethod method;
1649 SVGA3dDeclUsage usage;
1650 uint32 usageIndex;
1651} SVGA3dVertexArrayIdentity;
1652
1653typedef
1654struct {
1655 SVGA3dVertexArrayIdentity identity;
1656 SVGA3dArray array;
1657 SVGA3dArrayRangeHint rangeHint;
1658} SVGA3dVertexDecl;
1659
1660typedef
1661struct {
1662 /*
1663 * Define a group of primitives to render, from sequential indices.
1664 *
1665 * The value of 'primitiveType' and 'primitiveCount' imply the
1666 * total number of vertices that will be rendered.
1667 */
1668 SVGA3dPrimitiveType primType;
1669 uint32 primitiveCount;
1670
1671 /*
1672 * Optional index buffer. If indexArray.surfaceId is
1673 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
1674 * without an index buffer is identical to rendering with an index
1675 * buffer containing the sequence [0, 1, 2, 3, ...].
1676 *
1677 * If an index buffer is in use, indexWidth specifies the width in
1678 * bytes of each index value. It must be less than or equal to
1679 * indexArray.stride.
1680 *
1681 * (Currently, the SVGA3D device requires index buffers to be tightly
1682 * packed. In other words, indexWidth == indexArray.stride)
1683 */
1684 SVGA3dArray indexArray;
1685 uint32 indexWidth;
1686
1687 /*
1688 * Optional index bias. This number is added to all indices from
1689 * indexArray before they are used as vertex array indices. This
1690 * can be used in multiple ways:
1691 *
1692 * - When not using an indexArray, this bias can be used to
1693 * specify where in the vertex arrays to begin rendering.
1694 *
1695 * - A positive number here is equivalent to increasing the
1696 * offset in each vertex array.
1697 *
1698 * - A negative number can be used to render using a small
1699 * vertex array and an index buffer that contains large
1700 * values. This may be used by some applications that
1701 * crop a vertex buffer without modifying their index
1702 * buffer.
1703 *
1704 * Note that rendering with a negative bias value may be slower and
1705 * use more memory than rendering with a positive or zero bias.
1706 */
1707 int32 indexBias;
1708} SVGA3dPrimitiveRange;
1709
1710typedef
1711struct {
1712 uint32 cid;
1713 uint32 numVertexDecls;
1714 uint32 numRanges;
1715
1716 /*
1717 * There are two variable size arrays after the
1718 * SVGA3dCmdDrawPrimitives structure. In order,
1719 * they are:
1720 *
1721 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
1722 * SVGA3D_MAX_VERTEX_ARRAYS;
1723 * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
1724 * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
1725 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
1726 * the frequency divisor for the corresponding vertex decl).
1727 */
1728} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
1729
1730typedef
1731struct {
1732 uint32 stage;
1733 SVGA3dTextureStateName name;
1734 union {
1735 uint32 value;
1736 float floatValue;
1737 };
1738} SVGA3dTextureState;
1739
1740typedef
1741struct {
1742 uint32 cid;
1743 /* Followed by variable number of SVGA3dTextureState structures */
1744} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
1745
1746typedef
1747struct {
1748 uint32 cid;
1749 SVGA3dTransformType type;
1750 float matrix[16];
1751} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
1752
1753typedef
1754struct {
1755 float min;
1756 float max;
1757} SVGA3dZRange;
1758
1759typedef
1760struct {
1761 uint32 cid;
1762 SVGA3dZRange zRange;
1763} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
1764
1765typedef
1766struct {
1767 float diffuse[4];
1768 float ambient[4];
1769 float specular[4];
1770 float emissive[4];
1771 float shininess;
1772} SVGA3dMaterial;
1773
1774typedef
1775struct {
1776 uint32 cid;
1777 SVGA3dFace face;
1778 SVGA3dMaterial material;
1779} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
1780
1781typedef
1782struct {
1783 uint32 cid;
1784 uint32 index;
1785 SVGA3dLightData data;
1786} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
1787
1788typedef
1789struct {
1790 uint32 cid;
1791 uint32 index;
1792 uint32 enabled;
1793} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
1794
1795typedef
1796struct {
1797 uint32 cid;
1798 SVGA3dRect rect;
1799} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
1800
1801typedef
1802struct {
1803 uint32 cid;
1804 SVGA3dRect rect;
1805} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
1806
1807typedef
1808struct {
1809 uint32 cid;
1810 uint32 index;
1811 float plane[4];
1812} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
1813
1814typedef
1815struct {
1816 uint32 cid;
1817 uint32 shid;
1818 SVGA3dShaderType type;
1819 /* Followed by variable number of DWORDs for shader bycode */
1820} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
1821
1822typedef
1823struct {
1824 uint32 cid;
1825 uint32 shid;
1826 SVGA3dShaderType type;
1827} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
1828
1829typedef
1830struct {
1831 uint32 cid;
1832 uint32 reg; /* register number */
1833 SVGA3dShaderType type;
1834 SVGA3dShaderConstType ctype;
1835 uint32 values[4];
1836} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
1837
1838typedef
1839struct {
1840 uint32 cid;
1841 SVGA3dShaderType type;
1842 uint32 shid;
1843} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
1844
1845typedef
1846struct {
1847 uint32 cid;
1848 SVGA3dQueryType type;
1849} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1850
1851typedef
1852struct {
1853 uint32 cid;
1854 SVGA3dQueryType type;
1855 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1856} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1857
1858typedef
1859struct {
1860 uint32 cid; /* Same parameters passed to END_QUERY */
1861 SVGA3dQueryType type;
1862 SVGAGuestPtr guestResult;
1863} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1864
1865typedef
1866struct {
1867 uint32 totalSize; /* Set by guest before query is ended. */
1868 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1869 union { /* Set by host on exit from PENDING state */
1870 uint32 result32;
1871 };
1872} SVGA3dQueryResult;
1873
1874/*
1875 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1876 *
1877 * This is a blit from an SVGA3D surface to a Screen Object. Just
1878 * like GMR-to-screen blits, this blit may be directed at a
1879 * specific screen or to the virtual coordinate space.
1880 *
1881 * The blit copies from a rectangular region of an SVGA3D surface
1882 * image to a rectangular region of a screen or screens.
1883 *
1884 * This command takes an optional variable-length list of clipping
1885 * rectangles after the body of the command. If no rectangles are
1886 * specified, there is no clipping region. The entire destRect is
1887 * drawn to. If one or more rectangles are included, they describe
1888 * a clipping region. The clip rectangle coordinates are measured
1889 * relative to the top-left corner of destRect.
1890 *
1891 * This clipping region serves multiple purposes:
1892 *
1893 * - It can be used to perform an irregularly shaped blit more
1894 * efficiently than by issuing many separate blit commands.
1895 *
1896 * - It is equivalent to allowing blits with non-integer
1897 * source coordinates. You could blit just one half-pixel
1898 * of a source, for example, by specifying a larger
1899 * destination rectangle than you need, then removing
1900 * part of it using a clip rectangle.
1901 *
1902 * Availability:
1903 * SVGA_FIFO_CAP_SCREEN_OBJECT
1904 *
1905 * Limitations:
1906 *
1907 * - Currently, no backend supports blits from a mipmap or face
1908 * other than the first one.
1909 */
1910
1911typedef
1912struct {
1913 SVGA3dSurfaceImageId srcImage;
1914 SVGASignedRect srcRect;
1915 uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
1916 SVGASignedRect destRect; /* Supports scaling if src/rest different size */
1917 /* Clipping: zero or more SVGASignedRects follow */
1918} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1919
1920typedef
1921struct {
1922 uint32 sid;
1923 SVGA3dTextureFilter filter;
1924} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
1925
1926
1927/*
1928 * Guest-backed surface definitions.
1929 */
1930
1931typedef uint32 SVGAMobId;
1932
1933typedef enum SVGAMobFormat {
1934 SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
1935 SVGA3D_MOBFMT_PTDEPTH_0 = 0,
1936 SVGA3D_MOBFMT_PTDEPTH_1 = 1,
1937 SVGA3D_MOBFMT_PTDEPTH_2 = 2,
1938 SVGA3D_MOBFMT_RANGE = 3,
1939 SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
1940 SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
1941 SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
1942 SVGA3D_MOBFMT_MAX,
1943} SVGAMobFormat;
1944
1945/*
1946 * Sizes of opaque types.
1947 */
1948
1949#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
1950#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
1951#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
1952#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
1953#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
1954#define SVGA3D_CONTEXT_DATA_SIZE 16384
1955
1956/*
1957 * SVGA3dCmdSetOTableBase --
1958 *
1959 * This command allows the guest to specify the base PPN of the
1960 * specified object table.
1961 */
1962
1963typedef enum {
1964 SVGA_OTABLE_MOB = 0,
1965 SVGA_OTABLE_MIN = 0,
1966 SVGA_OTABLE_SURFACE = 1,
1967 SVGA_OTABLE_CONTEXT = 2,
1968 SVGA_OTABLE_SHADER = 3,
1969 SVGA_OTABLE_SCREEN_TARGET = 4,
1970 SVGA_OTABLE_DX9_MAX = 5,
1971 SVGA_OTABLE_MAX = 8
1972} SVGAOTableType;
1973
1974typedef
1975struct {
1976 SVGAOTableType type;
1977 PPN baseAddress;
1978 uint32 sizeInBytes;
1979 uint32 validSizeInBytes;
1980 SVGAMobFormat ptDepth;
1981} __packed
1982SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
1983
1984typedef
1985struct {
1986 SVGAOTableType type;
1987 PPN64 baseAddress;
1988 uint32 sizeInBytes;
1989 uint32 validSizeInBytes;
1990 SVGAMobFormat ptDepth;
1991} __packed
1992SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
1993
1994typedef
1995struct {
1996 SVGAOTableType type;
1997} __packed
1998SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
1999
2000/*
2001 * Define a memory object (Mob) in the OTable.
2002 */
2003
2004typedef
2005struct SVGA3dCmdDefineGBMob {
2006 SVGAMobId mobid;
2007 SVGAMobFormat ptDepth;
2008 PPN base;
2009 uint32 sizeInBytes;
2010} __packed
2011SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
2012
2013
2014/*
2015 * Destroys an object in the OTable.
2016 */
2017
2018typedef
2019struct SVGA3dCmdDestroyGBMob {
2020 SVGAMobId mobid;
2021} __packed
2022SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
2023
2024/*
2025 * Redefine an object in the OTable.
2026 */
2027
2028typedef
2029struct SVGA3dCmdRedefineGBMob {
2030 SVGAMobId mobid;
2031 SVGAMobFormat ptDepth;
2032 PPN base;
2033 uint32 sizeInBytes;
2034} __packed
2035SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
2036
2037/*
2038 * Define a memory object (Mob) in the OTable with a PPN64 base.
2039 */
2040
2041typedef
2042struct SVGA3dCmdDefineGBMob64 {
2043 SVGAMobId mobid;
2044 SVGAMobFormat ptDepth;
2045 PPN64 base;
2046 uint32 sizeInBytes;
2047} __packed
2048SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
2049
2050/*
2051 * Redefine an object in the OTable with PPN64 base.
2052 */
2053
2054typedef
2055struct SVGA3dCmdRedefineGBMob64 {
2056 SVGAMobId mobid;
2057 SVGAMobFormat ptDepth;
2058 PPN64 base;
2059 uint32 sizeInBytes;
2060} __packed
2061SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
2062
2063/*
2064 * Notification that the page tables have been modified.
2065 */
2066
2067typedef
2068struct SVGA3dCmdUpdateGBMobMapping {
2069 SVGAMobId mobid;
2070} __packed
2071SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
2072
2073/*
2074 * Define a guest-backed surface.
2075 */
2076
2077typedef
2078struct SVGA3dCmdDefineGBSurface {
2079 uint32 sid;
2080 SVGA3dSurfaceFlags surfaceFlags;
2081 SVGA3dSurfaceFormat format;
2082 uint32 numMipLevels;
2083 uint32 multisampleCount;
2084 SVGA3dTextureFilter autogenFilter;
2085 SVGA3dSize size;
2086} __packed
2087SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
2088
2089/*
2090 * Destroy a guest-backed surface.
2091 */
2092
2093typedef
2094struct SVGA3dCmdDestroyGBSurface {
2095 uint32 sid;
2096} __packed
2097SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
2098
2099/*
2100 * Bind a guest-backed surface to an object.
2101 */
2102
2103typedef
2104struct SVGA3dCmdBindGBSurface {
2105 uint32 sid;
2106 SVGAMobId mobid;
2107} __packed
2108SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
2109
2110/*
2111 * Conditionally bind a mob to a guest backed surface if testMobid
2112 * matches the currently bound mob. Optionally issue a readback on
2113 * the surface while it is still bound to the old mobid if the mobid
2114 * is changed by this command.
2115 */
2116
2117#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
2118
2119typedef
2120struct{
2121 uint32 sid;
2122 SVGAMobId testMobid;
2123 SVGAMobId mobid;
2124 uint32 flags;
2125} __packed
2126SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
2127
2128/*
2129 * Update an image in a guest-backed surface.
2130 * (Inform the device that the guest-contents have been updated.)
2131 */
2132
2133typedef
2134struct SVGA3dCmdUpdateGBImage {
2135 SVGA3dSurfaceImageId image;
2136 SVGA3dBox box;
2137} __packed
2138SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
2139
2140/*
2141 * Update an entire guest-backed surface.
2142 * (Inform the device that the guest-contents have been updated.)
2143 */
2144
2145typedef
2146struct SVGA3dCmdUpdateGBSurface {
2147 uint32 sid;
2148} __packed
2149SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
2150
2151/*
2152 * Readback an image in a guest-backed surface.
2153 * (Request the device to flush the dirty contents into the guest.)
2154 */
2155
2156typedef
2157struct SVGA3dCmdReadbackGBImage {
2158 SVGA3dSurfaceImageId image;
2159} __packed
2160SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
2161
2162/*
2163 * Readback an entire guest-backed surface.
2164 * (Request the device to flush the dirty contents into the guest.)
2165 */
2166
2167typedef
2168struct SVGA3dCmdReadbackGBSurface {
2169 uint32 sid;
2170} __packed
2171SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
2172
2173/*
2174 * Readback a sub rect of an image in a guest-backed surface. After
2175 * issuing this command the driver is required to issue an update call
2176 * of the same region before issuing any other commands that reference
2177 * this surface or rendering is not guaranteed.
2178 */
2179
2180typedef
2181struct SVGA3dCmdReadbackGBImagePartial {
2182 SVGA3dSurfaceImageId image;
2183 SVGA3dBox box;
2184 uint32 invertBox;
2185} __packed
2186SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
2187
2188/*
2189 * Invalidate an image in a guest-backed surface.
2190 * (Notify the device that the contents can be lost.)
2191 */
2192
2193typedef
2194struct SVGA3dCmdInvalidateGBImage {
2195 SVGA3dSurfaceImageId image;
2196} __packed
2197SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
2198
2199/*
2200 * Invalidate an entire guest-backed surface.
2201 * (Notify the device that the contents if all images can be lost.)
2202 */
2203
2204typedef
2205struct SVGA3dCmdInvalidateGBSurface {
2206 uint32 sid;
2207} __packed
2208SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
2209
2210/*
2211 * Invalidate a sub rect of an image in a guest-backed surface. After
2212 * issuing this command the driver is required to issue an update call
2213 * of the same region before issuing any other commands that reference
2214 * this surface or rendering is not guaranteed.
2215 */
2216
2217typedef
2218struct SVGA3dCmdInvalidateGBImagePartial {
2219 SVGA3dSurfaceImageId image;
2220 SVGA3dBox box;
2221 uint32 invertBox;
2222} __packed
2223SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
2224
2225/*
2226 * Define a guest-backed context.
2227 */
2228
2229typedef
2230struct SVGA3dCmdDefineGBContext {
2231 uint32 cid;
2232} __packed
2233SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
2234
2235/*
2236 * Destroy a guest-backed context.
2237 */
2238
2239typedef
2240struct SVGA3dCmdDestroyGBContext {
2241 uint32 cid;
2242} __packed
2243SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
2244
2245/*
2246 * Bind a guest-backed context.
2247 *
2248 * validContents should be set to 0 for new contexts,
2249 * and 1 if this is an old context which is getting paged
2250 * back on to the device.
2251 *
2252 * For new contexts, it is recommended that the driver
2253 * issue commands to initialize all interesting state
2254 * prior to rendering.
2255 */
2256
2257typedef
2258struct SVGA3dCmdBindGBContext {
2259 uint32 cid;
2260 SVGAMobId mobid;
2261 uint32 validContents;
2262} __packed
2263SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
2264
2265/*
2266 * Readback a guest-backed context.
2267 * (Request that the device flush the contents back into guest memory.)
2268 */
2269
2270typedef
2271struct SVGA3dCmdReadbackGBContext {
2272 uint32 cid;
2273} __packed
2274SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
2275
2276/*
2277 * Invalidate a guest-backed context.
2278 */
2279typedef
2280struct SVGA3dCmdInvalidateGBContext {
2281 uint32 cid;
2282} __packed
2283SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
2284
2285/*
2286 * Define a guest-backed shader.
2287 */
2288
2289typedef
2290struct SVGA3dCmdDefineGBShader {
2291 uint32 shid;
2292 SVGA3dShaderType type;
2293 uint32 sizeInBytes;
2294} __packed
2295SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
2296
2297/*
2298 * Bind a guest-backed shader.
2299 */
2300
2301typedef struct SVGA3dCmdBindGBShader {
2302 uint32 shid;
2303 SVGAMobId mobid;
2304 uint32 offsetInBytes;
2305} __packed
2306SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
2307
2308/*
2309 * Destroy a guest-backed shader.
2310 */
2311
2312typedef struct SVGA3dCmdDestroyGBShader {
2313 uint32 shid;
2314} __packed
2315SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
2316
2317typedef
2318struct {
2319 uint32 cid;
2320 uint32 regStart;
2321 SVGA3dShaderType shaderType;
2322 SVGA3dShaderConstType constType;
2323
2324 /*
2325 * Followed by a variable number of shader constants.
2326 *
2327 * Note that FLOAT and INT constants are 4-dwords in length, while
2328 * BOOL constants are 1-dword in length.
2329 */
2330} __packed
2331SVGA3dCmdSetGBShaderConstInline;
2332/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
2333
2334typedef
2335struct {
2336 uint32 cid;
2337 SVGA3dQueryType type;
2338} __packed
2339SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
2340
2341typedef
2342struct {
2343 uint32 cid;
2344 SVGA3dQueryType type;
2345 SVGAMobId mobid;
2346 uint32 offset;
2347} __packed
2348SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
2349
2350
2351/*
2352 * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
2353 *
2354 * The semantics of this command are identical to the
2355 * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
2356 * to a Mob instead of a GMR.
2357 */
2358
2359typedef
2360struct {
2361 uint32 cid;
2362 SVGA3dQueryType type;
2363 SVGAMobId mobid;
2364 uint32 offset;
2365} __packed
2366SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
2367
2368typedef
2369struct {
2370 SVGAMobId mobid;
2371 uint32 fbOffset;
2372 uint32 initalized;
2373} __packed
2374SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
2375
2376typedef
2377struct {
2378 SVGAMobId mobid;
2379 uint32 gartOffset;
2380} __packed
2381SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
2382
2383
2384typedef
2385struct {
2386 uint32 gartOffset;
2387 uint32 numPages;
2388} __packed
2389SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
2390
2391
2392/*
2393 * Screen Targets
2394 */
2395#define SVGA_STFLAG_PRIMARY (1 << 0)
2396
2397typedef
2398struct {
2399 uint32 stid;
2400 uint32 width;
2401 uint32 height;
2402 int32 xRoot;
2403 int32 yRoot;
2404 uint32 flags;
2405} __packed
2406SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
2407
2408typedef
2409struct {
2410 uint32 stid;
2411} __packed
2412SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
2413
2414typedef
2415struct {
2416 uint32 stid;
2417 SVGA3dSurfaceImageId image;
2418} __packed
2419SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
2420
2421typedef
2422struct {
2423 uint32 stid;
2424 SVGA3dBox box;
2425} __packed
2426SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
2427
2428/*
2429 * Capability query index.
2430 *
2431 * Notes:
2432 *
2433 * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
2434 * fixed-function texture units available. Each of these units
2435 * work in both FFP and Shader modes, and they support texture
2436 * transforms and texture coordinates. The host may have additional
2437 * texture image units that are only usable with shaders.
2438 *
2439 * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
2440 * return TRUE. Even on physical hardware that does not support
2441 * these formats natively, the SVGA3D device will provide an emulation
2442 * which should be invisible to the guest OS.
2443 *
2444 * In general, the SVGA3D device should support any operation on
2445 * any surface format, it just may perform some of these
2446 * operations in software depending on the capabilities of the
2447 * available physical hardware.
2448 *
2449 * XXX: In the future, we will add capabilities that describe in
2450 * detail what formats are supported in hardware for what kinds
2451 * of operations.
2452 */
2453
2454typedef enum {
2455 SVGA3D_DEVCAP_3D = 0,
2456 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
2457 SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
2458 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
2459 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
2460 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
2461 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
2462 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
2463 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
2464 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
2465 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
2466 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
2467 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
2468 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
2469 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
2470 SVGA3D_DEVCAP_QUERY_TYPES = 15,
2471 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
2472 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
2473 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
2474 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
2475 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
2476 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
2477 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
2478 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
2479 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
2480 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
2481 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
2482 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
2483 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
2484 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
2485 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
2486 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
2487 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
2488 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
2489 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
2490 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
2491 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
2492 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
2493 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
2494 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
2495 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
2496 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
2497 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
2498 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
2499 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
2500 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
2501 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
2502 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
2503 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
2504 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
2505 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
2506 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
2507 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
2508 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
2509 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
2510 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
2511 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
2512 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
2513 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
2514 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
2515 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
2516 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
2517 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
2518
2519 /*
2520 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
2521 * render targets. This does no include the depth or stencil targets.
2522 */
2523 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
2524
2525 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
2526 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
2527 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
2528 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
2529 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
2530 SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
2531 SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
2532 SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
2533 SVGA3D_DEVCAP_SUPERSAMPLE = 73,
2534 SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
2535 SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
2536 SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
2537
2538 /*
2539 * This is the maximum number of SVGA context IDs that the guest
2540 * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
2541 */
2542 SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
2543
2544 /*
2545 * This is the maximum number of SVGA surface IDs that the guest
2546 * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
2547 */
2548 SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
2549
2550 SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
2551 SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
2552 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
2553
2554 SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
2555 SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
2556
2557 /*
2558 * Deprecated.
2559 */
2560 SVGA3D_DEVCAP_VGPU10 = 84,
2561
2562 /*
2563 * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
2564 * ored together, one for every type of video decoding supported.
2565 */
2566 SVGA3D_DEVCAP_VIDEO_DECODE = 85,
2567
2568 /*
2569 * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
2570 * ored together, one for every type of video processing supported.
2571 */
2572 SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
2573
2574 SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
2575 SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
2576 SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
2577 SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
2578
2579 SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
2580
2581 /*
2582 * Does the host support the SVGA logic ops commands?
2583 */
2584 SVGA3D_DEVCAP_LOGICOPS = 92,
2585
2586 /*
2587 * What support does the host have for screen targets?
2588 *
2589 * See the SVGA3D_SCREENTARGET_CAP bits below.
2590 */
2591 SVGA3D_DEVCAP_SCREENTARGETS = 93,
2592
2593 SVGA3D_DEVCAP_MAX /* This must be the last index. */
2594} SVGA3dDevCapIndex;
2595
2596typedef union {
2597 Bool b;
2598 uint32 u;
2599 int32 i;
2600 float f;
2601} SVGA3dDevCapResult;
2602
2603typedef enum {
2604 SVGA3DCAPS_RECORD_UNKNOWN = 0,
2605 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
2606 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
2607 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
2608} SVGA3dCapsRecordType;
2609
2610typedef
2611struct SVGA3dCapsRecordHeader {
2612 uint32 length;
2613 SVGA3dCapsRecordType type;
2614}
2615SVGA3dCapsRecordHeader;
2616
2617typedef
2618struct SVGA3dCapsRecord {
2619 SVGA3dCapsRecordHeader header;
2620 uint32 data[1];
2621}
2622SVGA3dCapsRecord;
2623
2624
2625typedef uint32 SVGA3dCapPair[2];
2626
2627#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef3385096145..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifdef __KERNEL__
29
30#include <drm/vmwgfx_drm.h>
31#define surf_size_struct struct drm_vmw_size
32
33#else /* __KERNEL__ */
34
35#ifndef ARRAY_SIZE
36#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
37#endif /* ARRAY_SIZE */
38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
42#define surf_size_struct SVGA3dSize
43#define u32 uint32
44#define u64 uint64_t
45#define U32_MAX ((u32)~0U)
46
47#endif /* __KERNEL__ */
48
49#include "svga3d_reg.h"
50
51/*
52 * enum svga3d_block_desc describes the active data channels in a block.
53 *
54 * There can be at-most four active channels in a block:
55 * 1. Red, bump W, luminance and depth are stored in the first channel.
56 * 2. Green, bump V and stencil are stored in the second channel.
57 * 3. Blue and bump U are stored in the third channel.
58 * 4. Alpha and bump Q are stored in the fourth channel.
59 *
60 * Block channels can be used to store compressed and buffer data:
61 * 1. For compressed formats, only the data channel is used and its size
62 * is equal to that of a singular block in the compression scheme.
63 * 2. For buffer formats, only the data channel is used and its size is
64 * exactly one byte in length.
65 * 3. In each case the bit depth represent the size of a singular block.
66 *
67 * Note: Compressed and IEEE formats do not use the bitMask structure.
68 */
69
70enum svga3d_block_desc {
71 SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
72 SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
73 data */
74 SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
75 data */
76 SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
77 U and V */
78 SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
79 data */
80 SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
81 data */
82 SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
83 channel */
84 SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
85 data */
86 SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
87 data */
88 SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
89 data */
90 SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
91 data */
92 SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
93 SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
94 channel */
95 SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
96 data */
97 SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
98 data */
99 SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
100 data depending on the
101 compression method used */
102 SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
103 floating point
104 representation in
105 all channels */
106 SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
107 data. */
108 SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
109 SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
110 SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
111 SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
112 SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
113 e.g., NV12. */
114 SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
115 Y, U, V, e.g., YV12. */
116
117 SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
118 SVGA3DBLOCKDESC_GREEN,
119 SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
120 SVGA3DBLOCKDESC_BLUE,
121 SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
122 SVGA3DBLOCKDESC_SRGB,
123 SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
124 SVGA3DBLOCKDESC_ALPHA,
125 SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
126 SVGA3DBLOCKDESC_SRGB,
127 SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
128 SVGA3DBLOCKDESC_V,
129 SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
130 SVGA3DBLOCKDESC_LUMINANCE,
131 SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
132 SVGA3DBLOCKDESC_W,
133 SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
134 SVGA3DBLOCKDESC_ALPHA,
135 SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
136 SVGA3DBLOCKDESC_V |
137 SVGA3DBLOCKDESC_W |
138 SVGA3DBLOCKDESC_Q,
139 SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
140 SVGA3DBLOCKDESC_ALPHA,
141 SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
142 SVGA3DBLOCKDESC_IEEE_FP,
143 SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
144 SVGA3DBLOCKDESC_GREEN,
145 SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
146 SVGA3DBLOCKDESC_BLUE,
147 SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
148 SVGA3DBLOCKDESC_ALPHA,
149 SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
150 SVGA3DBLOCKDESC_STENCIL,
151 SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
152 SVGA3DBLOCKDESC_Y,
153 SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
154 SVGA3DBLOCKDESC_Y |
155 SVGA3DBLOCKDESC_U_VIDEO |
156 SVGA3DBLOCKDESC_V_VIDEO,
157 SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
158 SVGA3DBLOCKDESC_EXP,
159 SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
160 SVGA3DBLOCKDESC_SRGB,
161 SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
162 SVGA3DBLOCKDESC_2PLANAR_YUV,
163 SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
164 SVGA3DBLOCKDESC_3PLANAR_YUV,
165};
166
167/*
168 * SVGA3dSurfaceDesc describes the actual pixel data.
169 *
170 * This structure provides the following information:
171 * 1. Block description.
172 * 2. Dimensions of a block in the surface.
173 * 3. Size of block in bytes.
174 * 4. Bit depth of the pixel data.
175 * 5. Channel bit depths and masks (if applicable).
176 */
177#define SVGA3D_CHANNEL_DEF(type) \
178 struct { \
179 union { \
180 type blue; \
181 type u; \
182 type uv_video; \
183 type u_video; \
184 }; \
185 union { \
186 type green; \
187 type v; \
188 type stencil; \
189 type v_video; \
190 }; \
191 union { \
192 type red; \
193 type w; \
194 type luminance; \
195 type y; \
196 type depth; \
197 type data; \
198 }; \
199 union { \
200 type alpha; \
201 type q; \
202 type exp; \
203 }; \
204 }
205
206struct svga3d_surface_desc {
207 enum svga3d_block_desc block_desc;
208 surf_size_struct block_size;
209 u32 bytes_per_block;
210 u32 pitch_bytes_per_block;
211
212 struct {
213 u32 total;
214 SVGA3D_CHANNEL_DEF(uint8);
215 } bit_depth;
216
217 struct {
218 SVGA3D_CHANNEL_DEF(uint8);
219 } bit_offset;
220};
221
222static const struct svga3d_surface_desc svga3d_surface_descs[] = {
223 {SVGA3DBLOCKDESC_NONE,
224 {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
225 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
226
227 {SVGA3DBLOCKDESC_RGB,
228 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
229 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
230
231 {SVGA3DBLOCKDESC_RGBA,
232 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
233 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
234
235 {SVGA3DBLOCKDESC_RGB,
236 {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
237 {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
238
239 {SVGA3DBLOCKDESC_RGB,
240 {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
241 {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
242
243 {SVGA3DBLOCKDESC_RGBA,
244 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
245 {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
246
247 {SVGA3DBLOCKDESC_RGBA,
248 {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
249 {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
250
251 {SVGA3DBLOCKDESC_DEPTH,
252 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
253 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
254
255 {SVGA3DBLOCKDESC_DEPTH,
256 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
257 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
258
259 {SVGA3DBLOCKDESC_DS,
260 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
261 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
262
263 {SVGA3DBLOCKDESC_DS,
264 {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
265 {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
266
267 {SVGA3DBLOCKDESC_LUMINANCE,
268 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
269 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
270
271 {SVGA3DBLOCKDESC_LA,
272 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
273 {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
274
275 {SVGA3DBLOCKDESC_LUMINANCE,
276 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
277 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
278
279 {SVGA3DBLOCKDESC_LA,
280 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
281 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
282
283 {SVGA3DBLOCKDESC_COMPRESSED,
284 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
285 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
286
287 {SVGA3DBLOCKDESC_COMPRESSED,
288 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
289 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
290
291 {SVGA3DBLOCKDESC_COMPRESSED,
292 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
293 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
294
295 {SVGA3DBLOCKDESC_COMPRESSED,
296 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
297 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
298
299 {SVGA3DBLOCKDESC_COMPRESSED,
300 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
301 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
302
303 {SVGA3DBLOCKDESC_UV,
304 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
305 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
306
307 {SVGA3DBLOCKDESC_UVL,
308 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
309 {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
310
311 {SVGA3DBLOCKDESC_UVL,
312 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
313 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
314
315 {SVGA3DBLOCKDESC_UVL,
316 {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
317 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
318
319 {SVGA3DBLOCKDESC_RGBA_FP,
320 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
321 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
322
323 {SVGA3DBLOCKDESC_RGBA_FP,
324 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
325 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
326
327 {SVGA3DBLOCKDESC_RGBA,
328 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
329 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
330
331 {SVGA3DBLOCKDESC_UV,
332 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
333 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
334
335 {SVGA3DBLOCKDESC_UVWQ,
336 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
337 {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
338
339 {SVGA3DBLOCKDESC_UV,
340 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
341 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
342
343 {SVGA3DBLOCKDESC_UVL,
344 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
345 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
346
347 {SVGA3DBLOCKDESC_UVWA,
348 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
349 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
350
351 {SVGA3DBLOCKDESC_ALPHA,
352 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
353 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
354
355 {SVGA3DBLOCKDESC_R_FP,
356 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
357 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
358
359 {SVGA3DBLOCKDESC_R_FP,
360 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
361 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
362
363 {SVGA3DBLOCKDESC_RG_FP,
364 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
365 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
366
367 {SVGA3DBLOCKDESC_RG_FP,
368 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
369 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
370
371 {SVGA3DBLOCKDESC_BUFFER,
372 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
373 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
374
375 {SVGA3DBLOCKDESC_DEPTH,
376 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
377 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
378
379 {SVGA3DBLOCKDESC_UV,
380 {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
381 {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
382
383 {SVGA3DBLOCKDESC_RG,
384 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
385 {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
386
387 {SVGA3DBLOCKDESC_RGBA,
388 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
389 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
390
391 {SVGA3DBLOCKDESC_YUV,
392 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
393 {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
394
395 {SVGA3DBLOCKDESC_YUV,
396 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
397 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
398
399 {SVGA3DBLOCKDESC_NV12,
400 {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
401 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
402
403 {SVGA3DBLOCKDESC_AYUV,
404 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
405 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
406
407 {SVGA3DBLOCKDESC_RGBA,
408 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
409 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
410
411 {SVGA3DBLOCKDESC_RGBA,
412 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
413 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
414
415 {SVGA3DBLOCKDESC_UVWQ,
416 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
417 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
418
419 {SVGA3DBLOCKDESC_RGB,
420 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
421 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
422
423 {SVGA3DBLOCKDESC_RGB_FP,
424 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
425 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
426
427 {SVGA3DBLOCKDESC_RGB,
428 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
429 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
430
431 {SVGA3DBLOCKDESC_UVW,
432 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
433 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
434
435 {SVGA3DBLOCKDESC_RGBA,
436 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
437 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
438
439 {SVGA3DBLOCKDESC_RGBA,
440 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
441 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
442
443 {SVGA3DBLOCKDESC_UVWQ,
444 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
445 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
446
447 {SVGA3DBLOCKDESC_UVWQ,
448 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
449 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
450
451 {SVGA3DBLOCKDESC_RG,
452 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
453 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
454
455 {SVGA3DBLOCKDESC_RG,
456 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
457 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
458
459 {SVGA3DBLOCKDESC_UV,
460 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
461 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
462
463 {SVGA3DBLOCKDESC_RG,
464 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
465 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
466
467 {SVGA3DBLOCKDESC_DS,
468 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
469 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
470
471 {SVGA3DBLOCKDESC_R_FP,
472 {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
473 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
474
475 {SVGA3DBLOCKDESC_GREEN,
476 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
477 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
478
479 {SVGA3DBLOCKDESC_RGBA,
480 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
481 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
482
483 {SVGA3DBLOCKDESC_RGBA,
484 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
485 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
486
487 {SVGA3DBLOCKDESC_RGB_FP,
488 {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
489 {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
490
491 {SVGA3DBLOCKDESC_RGBA,
492 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
493 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
494
495 {SVGA3DBLOCKDESC_RGBA,
496 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
497 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
498
499 {SVGA3DBLOCKDESC_RGBA_SRGB,
500 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
501 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
502
503 {SVGA3DBLOCKDESC_RGBA,
504 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
505 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
506
507 {SVGA3DBLOCKDESC_RGBA,
508 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
509 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
510
511 {SVGA3DBLOCKDESC_RG,
512 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
513 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
514
515 {SVGA3DBLOCKDESC_RG_FP,
516 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
517 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
518
519 {SVGA3DBLOCKDESC_UV,
520 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
521 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
522
523 {SVGA3DBLOCKDESC_RED,
524 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
525 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
526
527 {SVGA3DBLOCKDESC_DEPTH,
528 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
529 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
530
531 {SVGA3DBLOCKDESC_RED,
532 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
533 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
534
535 {SVGA3DBLOCKDESC_RED,
536 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
537 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
538
539 {SVGA3DBLOCKDESC_RG,
540 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
541 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
542
543 {SVGA3DBLOCKDESC_DS,
544 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
545 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
546
547 {SVGA3DBLOCKDESC_RED,
548 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
549 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
550
551 {SVGA3DBLOCKDESC_GREEN,
552 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
553 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
554
555 {SVGA3DBLOCKDESC_RG,
556 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
557 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
558
559 {SVGA3DBLOCKDESC_RG,
560 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
561 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
562
563 {SVGA3DBLOCKDESC_RG,
564 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
565 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
566
567 {SVGA3DBLOCKDESC_UV,
568 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
569 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
570
571 {SVGA3DBLOCKDESC_RED,
572 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
573 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
574
575 {SVGA3DBLOCKDESC_RED,
576 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
577 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
578
579 {SVGA3DBLOCKDESC_RED,
580 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
581 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
582
583 {SVGA3DBLOCKDESC_U,
584 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
585 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
586
587 {SVGA3DBLOCKDESC_U,
588 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
589 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
590
591 {SVGA3DBLOCKDESC_RED,
592 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
593 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
594
595 {SVGA3DBLOCKDESC_RED,
596 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
597 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
598
599 {SVGA3DBLOCKDESC_RED,
600 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
601 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
602
603 {SVGA3DBLOCKDESC_U,
604 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
605 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
606
607 {SVGA3DBLOCKDESC_U,
608 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
609 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
610
611 {SVGA3DBLOCKDESC_RED,
612 {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
613 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
614
615 {SVGA3DBLOCKDESC_RGBE,
616 {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
617 {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
618
619 {SVGA3DBLOCKDESC_RG,
620 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
621 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
622
623 {SVGA3DBLOCKDESC_RG,
624 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
625 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
626
627 {SVGA3DBLOCKDESC_COMPRESSED,
628 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
629 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
630
631 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
632 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
633 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
634
635 {SVGA3DBLOCKDESC_COMPRESSED,
636 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
637 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
638
639 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
640 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
641 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
642
643 {SVGA3DBLOCKDESC_COMPRESSED,
644 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
645 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
646
647 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
648 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
649 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
650
651 {SVGA3DBLOCKDESC_COMPRESSED,
652 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
653 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
654
655 {SVGA3DBLOCKDESC_COMPRESSED,
656 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
657 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
658
659 {SVGA3DBLOCKDESC_COMPRESSED,
660 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
661 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
662
663 {SVGA3DBLOCKDESC_COMPRESSED,
664 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
665 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
666
667 {SVGA3DBLOCKDESC_COMPRESSED,
668 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
669 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
670
671 {SVGA3DBLOCKDESC_COMPRESSED,
672 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
673 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
674
675 {SVGA3DBLOCKDESC_RGBA,
676 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
677 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
678
679 {SVGA3DBLOCKDESC_RGBA,
680 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
681 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
682
683 {SVGA3DBLOCKDESC_RGBA_SRGB,
684 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
685 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
686
687 {SVGA3DBLOCKDESC_RGB,
688 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
689 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
690
691 {SVGA3DBLOCKDESC_RGB_SRGB,
692 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
693 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
694
695 {SVGA3DBLOCKDESC_DEPTH,
696 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
697 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
698
699 {SVGA3DBLOCKDESC_DS,
700 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
701 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
702
703 {SVGA3DBLOCKDESC_DS,
704 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
705 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
706};
707
708static inline u32 clamped_umul32(u32 a, u32 b)
709{
710 u64 tmp = (u64) a*b;
711 return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
712}
713
714static inline const struct svga3d_surface_desc *
715svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
716{
717 if (format < ARRAY_SIZE(svga3d_surface_descs))
718 return &svga3d_surface_descs[format];
719
720 return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
721}
722
723/*
724 *----------------------------------------------------------------------
725 *
726 * svga3dsurface_get_mip_size --
727 *
728 * Given a base level size and the mip level, compute the size of
729 * the mip level.
730 *
731 * Results:
732 * See above.
733 *
734 * Side effects:
735 * None.
736 *
737 *----------------------------------------------------------------------
738 */
739
740static inline surf_size_struct
741svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
742{
743 surf_size_struct size;
744
745 size.width = max_t(u32, base_level.width >> mip_level, 1);
746 size.height = max_t(u32, base_level.height >> mip_level, 1);
747 size.depth = max_t(u32, base_level.depth >> mip_level, 1);
748 return size;
749}
750
751static inline void
752svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
753 const surf_size_struct *pixel_size,
754 surf_size_struct *block_size)
755{
756 block_size->width = DIV_ROUND_UP(pixel_size->width,
757 desc->block_size.width);
758 block_size->height = DIV_ROUND_UP(pixel_size->height,
759 desc->block_size.height);
760 block_size->depth = DIV_ROUND_UP(pixel_size->depth,
761 desc->block_size.depth);
762}
763
764static inline bool
765svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
766{
767 return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
768}
769
770static inline u32
771svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
772 const surf_size_struct *size)
773{
774 u32 pitch;
775 surf_size_struct blocks;
776
777 svga3dsurface_get_size_in_blocks(desc, size, &blocks);
778
779 pitch = blocks.width * desc->pitch_bytes_per_block;
780
781 return pitch;
782}
783
784/*
785 *-----------------------------------------------------------------------------
786 *
787 * svga3dsurface_get_image_buffer_size --
788 *
789 * Return the number of bytes of buffer space required to store
790 * one image of a surface, optionally using the specified pitch.
791 *
792 * If pitch is zero, it is assumed that rows are tightly packed.
793 *
794 * This function is overflow-safe. If the result would have
795 * overflowed, instead we return MAX_UINT32.
796 *
797 * Results:
798 * Byte count.
799 *
800 * Side effects:
801 * None.
802 *
803 *-----------------------------------------------------------------------------
804 */
805
806static inline u32
807svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
808 const surf_size_struct *size,
809 u32 pitch)
810{
811 surf_size_struct image_blocks;
812 u32 slice_size, total_size;
813
814 svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
815
816 if (svga3dsurface_is_planar_surface(desc)) {
817 total_size = clamped_umul32(image_blocks.width,
818 image_blocks.height);
819 total_size = clamped_umul32(total_size, image_blocks.depth);
820 total_size = clamped_umul32(total_size, desc->bytes_per_block);
821 return total_size;
822 }
823
824 if (pitch == 0)
825 pitch = svga3dsurface_calculate_pitch(desc, size);
826
827 slice_size = clamped_umul32(image_blocks.height, pitch);
828 total_size = clamped_umul32(slice_size, image_blocks.depth);
829
830 return total_size;
831}
832
833static inline u32
834svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
835 surf_size_struct base_level_size,
836 u32 num_mip_levels,
837 bool cubemap)
838{
839 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
840 u64 total_size = 0;
841 u32 mip;
842
843 for (mip = 0; mip < num_mip_levels; mip++) {
844 surf_size_struct size =
845 svga3dsurface_get_mip_size(base_level_size, mip);
846 total_size += svga3dsurface_get_image_buffer_size(desc,
847 &size, 0);
848 }
849
850 if (cubemap)
851 total_size *= SVGA3D_MAX_SURFACE_FACES;
852
853 return (u32) min_t(u64, total_size, (u64) U32_MAX);
854}
855
856
857/**
858 * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
859 * in an image (or volume).
860 *
861 * @width: The image width in pixels.
862 * @height: The image height in pixels
863 */
864static inline u32
865svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
866 u32 width, u32 height,
867 u32 x, u32 y, u32 z)
868{
869 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
870 const u32 bw = desc->block_size.width, bh = desc->block_size.height;
871 const u32 bd = desc->block_size.depth;
872 const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
873 const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
874 const u32 offset = (z / bd * imgstride +
875 y / bh * rowstride +
876 x / bw * desc->bytes_per_block);
877 return offset;
878}
879
880
881static inline u32
882svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
883 surf_size_struct baseLevelSize,
884 u32 numMipLevels,
885 u32 face,
886 u32 mip)
887
888{
889 u32 offset;
890 u32 mipChainBytes;
891 u32 mipChainBytesToLevel;
892 u32 i;
893 const struct svga3d_surface_desc *desc;
894 surf_size_struct mipSize;
895 u32 bytes;
896
897 desc = svga3dsurface_get_desc(format);
898
899 mipChainBytes = 0;
900 mipChainBytesToLevel = 0;
901 for (i = 0; i < numMipLevels; i++) {
902 mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
903 bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
904 mipChainBytes += bytes;
905 if (i < mip)
906 mipChainBytesToLevel += bytes;
907 }
908
909 offset = mipChainBytes * face + mipChainBytesToLevel;
910
911 return offset;
912}
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836dedcfc2..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * Silly typedefs for the svga headers. Currently the headers are shared
30 * between all components that talk to svga. And as such the headers are
31 * are in a completely different style and use weird defines.
32 *
33 * This file lets all the ugly be prefixed with svga*.
34 */
35
36#ifndef _SVGA_TYPES_H_
37#define _SVGA_TYPES_H_
38
39typedef uint16_t uint16;
40typedef uint32_t uint32;
41typedef uint8_t uint8;
42typedef int32_t int32;
43typedef bool Bool;
44
45#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 000000000000..9c42e96da510
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * This file implements the vmwgfx context binding manager,
29 * The sole reason for having to use this code is that vmware guest
30 * backed contexts can be swapped out to their backing mobs by the device
31 * at any time, also swapped in at any time. At swapin time, the device
32 * validates the context bindings to make sure they point to valid resources.
33 * It's this outside-of-drawcall validation (that can happen at any time),
34 * that makes this code necessary.
35 *
36 * We therefore need to kill any context bindings pointing to a resource
37 * when the resource is swapped out. Furthermore, if the vmwgfx driver has
38 * swapped out the context we can't swap it in again to kill bindings because
39 * of backing mob reservation lockdep violations, so as part of
40 * context swapout, also kill all bindings of a context, so that they are
41 * already killed if a resource to which a binding points
42 * needs to be swapped out.
43 *
44 * Note that a resource can be pointed to by bindings from multiple contexts,
45 * Therefore we can't easily protect this data by a per context mutex
46 * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
47 * to protect all binding manager data.
48 *
49 * Finally, any association between a context and a global resource
50 * (surface, shader or even DX query) is conceptually a context binding that
51 * needs to be tracked by this code.
52 */
53
54#include "vmwgfx_drv.h"
55#include "vmwgfx_binding.h"
56#include "device_include/svga3d_reg.h"
57
58#define VMW_BINDING_RT_BIT 0
59#define VMW_BINDING_PS_BIT 1
60#define VMW_BINDING_SO_BIT 2
61#define VMW_BINDING_VB_BIT 3
62#define VMW_BINDING_NUM_BITS 4
63
64#define VMW_BINDING_PS_SR_BIT 0
65
66/**
67 * struct vmw_ctx_binding_state - per context binding state
68 *
69 * @dev_priv: Pointer to device private structure.
70 * @list: linked list of individual active bindings.
71 * @render_targets: Render target bindings.
72 * @texture_units: Texture units bindings.
73 * @ds_view: Depth-stencil view binding.
74 * @so_targets: StreamOutput target bindings.
75 * @vertex_buffers: Vertex buffer bindings.
76 * @index_buffer: Index buffer binding.
77 * @per_shader: Per shader-type bindings.
78 * @dirty: Bitmap tracking per binding-type changes that have not yet
79 * been emitted to the device.
80 * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
81 * have not yet been emitted to the device.
82 * @bind_cmd_buffer: Scratch space used to construct binding commands.
83 * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
84 * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
85 * device binding slot of the first command data entry in @bind_cmd_buffer.
86 *
87 * Note that this structure also provides storage space for the individual
88 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
89 * for individual bindings.
90 *
91 */
92struct vmw_ctx_binding_state {
93 struct vmw_private *dev_priv;
94 struct list_head list;
95 struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
96 struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
97 struct vmw_ctx_bindinfo_view ds_view;
98 struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
99 struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
100 struct vmw_ctx_bindinfo_ib index_buffer;
101 struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
102
103 unsigned long dirty;
104 DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
105
106 u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
107 u32 bind_cmd_count;
108 u32 bind_first_slot;
109};
110
111static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
112static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
113 bool rebind);
114static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
115static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
116static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
117static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
118static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
119static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
120static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
121 bool rebind);
122static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
123static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
124static void vmw_binding_build_asserts(void) __attribute__ ((unused));
125
126typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
127
128/**
129 * struct vmw_binding_info - Per binding type information for the binding
130 * manager
131 *
132 * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
133 * @offsets: array[shader_slot] of offsets to the array[slot]
134 * of struct bindings for the binding type.
135 * @scrub_func: Pointer to the scrub function for this binding type.
136 *
137 * Holds static information to help optimize the binding manager and avoid
138 * an excessive amount of switch statements.
139 */
140struct vmw_binding_info {
141 size_t size;
142 const size_t *offsets;
143 vmw_scrub_func scrub_func;
144};
145
146/*
147 * A number of static variables that help determine the scrub func and the
148 * location of the struct vmw_ctx_bindinfo slots for each binding type.
149 */
150static const size_t vmw_binding_shader_offsets[] = {
151 offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
152 offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
153 offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
154};
155static const size_t vmw_binding_rt_offsets[] = {
156 offsetof(struct vmw_ctx_binding_state, render_targets),
157};
158static const size_t vmw_binding_tex_offsets[] = {
159 offsetof(struct vmw_ctx_binding_state, texture_units),
160};
161static const size_t vmw_binding_cb_offsets[] = {
162 offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
163 offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
164 offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
165};
166static const size_t vmw_binding_dx_ds_offsets[] = {
167 offsetof(struct vmw_ctx_binding_state, ds_view),
168};
169static const size_t vmw_binding_sr_offsets[] = {
170 offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
171 offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
172 offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
173};
174static const size_t vmw_binding_so_offsets[] = {
175 offsetof(struct vmw_ctx_binding_state, so_targets),
176};
177static const size_t vmw_binding_vb_offsets[] = {
178 offsetof(struct vmw_ctx_binding_state, vertex_buffers),
179};
180static const size_t vmw_binding_ib_offsets[] = {
181 offsetof(struct vmw_ctx_binding_state, index_buffer),
182};
183
184static const struct vmw_binding_info vmw_binding_infos[] = {
185 [vmw_ctx_binding_shader] = {
186 .size = sizeof(struct vmw_ctx_bindinfo_shader),
187 .offsets = vmw_binding_shader_offsets,
188 .scrub_func = vmw_binding_scrub_shader},
189 [vmw_ctx_binding_rt] = {
190 .size = sizeof(struct vmw_ctx_bindinfo_view),
191 .offsets = vmw_binding_rt_offsets,
192 .scrub_func = vmw_binding_scrub_render_target},
193 [vmw_ctx_binding_tex] = {
194 .size = sizeof(struct vmw_ctx_bindinfo_tex),
195 .offsets = vmw_binding_tex_offsets,
196 .scrub_func = vmw_binding_scrub_texture},
197 [vmw_ctx_binding_cb] = {
198 .size = sizeof(struct vmw_ctx_bindinfo_cb),
199 .offsets = vmw_binding_cb_offsets,
200 .scrub_func = vmw_binding_scrub_cb},
201 [vmw_ctx_binding_dx_shader] = {
202 .size = sizeof(struct vmw_ctx_bindinfo_shader),
203 .offsets = vmw_binding_shader_offsets,
204 .scrub_func = vmw_binding_scrub_dx_shader},
205 [vmw_ctx_binding_dx_rt] = {
206 .size = sizeof(struct vmw_ctx_bindinfo_view),
207 .offsets = vmw_binding_rt_offsets,
208 .scrub_func = vmw_binding_scrub_dx_rt},
209 [vmw_ctx_binding_sr] = {
210 .size = sizeof(struct vmw_ctx_bindinfo_view),
211 .offsets = vmw_binding_sr_offsets,
212 .scrub_func = vmw_binding_scrub_sr},
213 [vmw_ctx_binding_ds] = {
214 .size = sizeof(struct vmw_ctx_bindinfo_view),
215 .offsets = vmw_binding_dx_ds_offsets,
216 .scrub_func = vmw_binding_scrub_dx_rt},
217 [vmw_ctx_binding_so] = {
218 .size = sizeof(struct vmw_ctx_bindinfo_so),
219 .offsets = vmw_binding_so_offsets,
220 .scrub_func = vmw_binding_scrub_so},
221 [vmw_ctx_binding_vb] = {
222 .size = sizeof(struct vmw_ctx_bindinfo_vb),
223 .offsets = vmw_binding_vb_offsets,
224 .scrub_func = vmw_binding_scrub_vb},
225 [vmw_ctx_binding_ib] = {
226 .size = sizeof(struct vmw_ctx_bindinfo_ib),
227 .offsets = vmw_binding_ib_offsets,
228 .scrub_func = vmw_binding_scrub_ib},
229};
230
231/**
232 * vmw_cbs_context - Return a pointer to the context resource of a
233 * context binding state tracker.
234 *
235 * @cbs: The context binding state tracker.
236 *
237 * Provided there are any active bindings, this function will return an
238 * unreferenced pointer to the context resource that owns the context
239 * binding state tracker. If there are no active bindings, this function
240 * will return NULL. Note that the caller must somehow ensure that a reference
241 * is held on the context resource prior to calling this function.
242 */
243static const struct vmw_resource *
244vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
245{
246 if (list_empty(&cbs->list))
247 return NULL;
248
249 return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
250 ctx_list)->ctx;
251}
252
253/**
254 * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
255 *
256 * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
257 * @bt: The binding type.
258 * @shader_slot: The shader slot of the binding. If none, then set to 0.
259 * @slot: The slot of the binding.
260 */
261static struct vmw_ctx_bindinfo *
262vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
263 enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
264{
265 const struct vmw_binding_info *b = &vmw_binding_infos[bt];
266 size_t offset = b->offsets[shader_slot] + b->size*slot;
267
268 return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
269}
270
271/**
272 * vmw_binding_drop: Stop tracking a context binding
273 *
274 * @bi: Pointer to binding tracker storage.
275 *
276 * Stops tracking a context binding, and re-initializes its storage.
277 * Typically used when the context binding is replaced with a binding to
278 * another (or the same, for that matter) resource.
279 */
280static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
281{
282 list_del(&bi->ctx_list);
283 if (!list_empty(&bi->res_list))
284 list_del(&bi->res_list);
285 bi->ctx = NULL;
286}
287
288/**
289 * vmw_binding_add: Start tracking a context binding
290 *
291 * @cbs: Pointer to the context binding state tracker.
292 * @bi: Information about the binding to track.
293 *
294 * Starts tracking the binding in the context binding
295 * state structure @cbs.
296 */
297void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
298 const struct vmw_ctx_bindinfo *bi,
299 u32 shader_slot, u32 slot)
300{
301 struct vmw_ctx_bindinfo *loc =
302 vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
303 const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
304
305 if (loc->ctx != NULL)
306 vmw_binding_drop(loc);
307
308 memcpy(loc, bi, b->size);
309 loc->scrubbed = false;
310 list_add(&loc->ctx_list, &cbs->list);
311 INIT_LIST_HEAD(&loc->res_list);
312}
313
314/**
315 * vmw_binding_transfer: Transfer a context binding tracking entry.
316 *
317 * @cbs: Pointer to the persistent context binding state tracker.
318 * @bi: Information about the binding to track.
319 *
320 */
321static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
322 const struct vmw_ctx_binding_state *from,
323 const struct vmw_ctx_bindinfo *bi)
324{
325 size_t offset = (unsigned long)bi - (unsigned long)from;
326 struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
327 ((unsigned long) cbs + offset);
328
329 if (loc->ctx != NULL) {
330 WARN_ON(bi->scrubbed);
331
332 vmw_binding_drop(loc);
333 }
334
335 if (bi->res != NULL) {
336 memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
337 list_add_tail(&loc->ctx_list, &cbs->list);
338 list_add_tail(&loc->res_list, &loc->res->binding_head);
339 }
340}
341
342/**
343 * vmw_binding_state_kill - Kill all bindings associated with a
344 * struct vmw_ctx_binding state structure, and re-initialize the structure.
345 *
346 * @cbs: Pointer to the context binding state tracker.
347 *
348 * Emits commands to scrub all bindings associated with the
349 * context binding state tracker. Then re-initializes the whole structure.
350 */
351void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
352{
353 struct vmw_ctx_bindinfo *entry, *next;
354
355 vmw_binding_state_scrub(cbs);
356 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
357 vmw_binding_drop(entry);
358}
359
360/**
361 * vmw_binding_state_scrub - Scrub all bindings associated with a
362 * struct vmw_ctx_binding state structure.
363 *
364 * @cbs: Pointer to the context binding state tracker.
365 *
366 * Emits commands to scrub all bindings associated with the
367 * context binding state tracker.
368 */
369void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
370{
371 struct vmw_ctx_bindinfo *entry;
372
373 list_for_each_entry(entry, &cbs->list, ctx_list) {
374 if (!entry->scrubbed) {
375 (void) vmw_binding_infos[entry->bt].scrub_func
376 (entry, false);
377 entry->scrubbed = true;
378 }
379 }
380
381 (void) vmw_binding_emit_dirty(cbs);
382}
383
384/**
385 * vmw_binding_res_list_kill - Kill all bindings on a
386 * resource binding list
387 *
388 * @head: list head of resource binding list
389 *
390 * Kills all bindings associated with a specific resource. Typically
391 * called before the resource is destroyed.
392 */
393void vmw_binding_res_list_kill(struct list_head *head)
394{
395 struct vmw_ctx_bindinfo *entry, *next;
396
397 vmw_binding_res_list_scrub(head);
398 list_for_each_entry_safe(entry, next, head, res_list)
399 vmw_binding_drop(entry);
400}
401
402/**
403 * vmw_binding_res_list_scrub - Scrub all bindings on a
404 * resource binding list
405 *
406 * @head: list head of resource binding list
407 *
408 * Scrub all bindings associated with a specific resource. Typically
409 * called before the resource is evicted.
410 */
411void vmw_binding_res_list_scrub(struct list_head *head)
412{
413 struct vmw_ctx_bindinfo *entry;
414
415 list_for_each_entry(entry, head, res_list) {
416 if (!entry->scrubbed) {
417 (void) vmw_binding_infos[entry->bt].scrub_func
418 (entry, false);
419 entry->scrubbed = true;
420 }
421 }
422
423 list_for_each_entry(entry, head, res_list) {
424 struct vmw_ctx_binding_state *cbs =
425 vmw_context_binding_state(entry->ctx);
426
427 (void) vmw_binding_emit_dirty(cbs);
428 }
429}
430
431
432/**
433 * vmw_binding_state_commit - Commit staged binding info
434 *
435 * @ctx: Pointer to context to commit the staged binding info to.
436 * @from: Staged binding info built during execbuf.
437 * @scrubbed: Transfer only scrubbed bindings.
438 *
439 * Transfers binding info from a temporary structure
440 * (typically used by execbuf) to the persistent
441 * structure in the context. This can be done once commands have been
442 * submitted to hardware
443 */
444void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
445 struct vmw_ctx_binding_state *from)
446{
447 struct vmw_ctx_bindinfo *entry, *next;
448
449 list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
450 vmw_binding_transfer(to, from, entry);
451 vmw_binding_drop(entry);
452 }
453}
454
455/**
456 * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
457 *
458 * @ctx: The context resource
459 *
460 * Walks through the context binding list and rebinds all scrubbed
461 * resources.
462 */
463int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
464{
465 struct vmw_ctx_bindinfo *entry;
466 int ret;
467
468 list_for_each_entry(entry, &cbs->list, ctx_list) {
469 if (likely(!entry->scrubbed))
470 continue;
471
472 if ((entry->res == NULL || entry->res->id ==
473 SVGA3D_INVALID_ID))
474 continue;
475
476 ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
477 if (unlikely(ret != 0))
478 return ret;
479
480 entry->scrubbed = false;
481 }
482
483 return vmw_binding_emit_dirty(cbs);
484}
485
486/**
487 * vmw_binding_scrub_shader - scrub a shader binding from a context.
488 *
489 * @bi: single binding information.
490 * @rebind: Whether to issue a bind instead of scrub command.
491 */
492static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
493{
494 struct vmw_ctx_bindinfo_shader *binding =
495 container_of(bi, typeof(*binding), bi);
496 struct vmw_private *dev_priv = bi->ctx->dev_priv;
497 struct {
498 SVGA3dCmdHeader header;
499 SVGA3dCmdSetShader body;
500 } *cmd;
501
502 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
503 if (unlikely(cmd == NULL)) {
504 DRM_ERROR("Failed reserving FIFO space for shader "
505 "unbinding.\n");
506 return -ENOMEM;
507 }
508
509 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
510 cmd->header.size = sizeof(cmd->body);
511 cmd->body.cid = bi->ctx->id;
512 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
513 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
514 vmw_fifo_commit(dev_priv, sizeof(*cmd));
515
516 return 0;
517}
518
519/**
520 * vmw_binding_scrub_render_target - scrub a render target binding
521 * from a context.
522 *
523 * @bi: single binding information.
524 * @rebind: Whether to issue a bind instead of scrub command.
525 */
526static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
527 bool rebind)
528{
529 struct vmw_ctx_bindinfo_view *binding =
530 container_of(bi, typeof(*binding), bi);
531 struct vmw_private *dev_priv = bi->ctx->dev_priv;
532 struct {
533 SVGA3dCmdHeader header;
534 SVGA3dCmdSetRenderTarget body;
535 } *cmd;
536
537 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
538 if (unlikely(cmd == NULL)) {
539 DRM_ERROR("Failed reserving FIFO space for render target "
540 "unbinding.\n");
541 return -ENOMEM;
542 }
543
544 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
545 cmd->header.size = sizeof(cmd->body);
546 cmd->body.cid = bi->ctx->id;
547 cmd->body.type = binding->slot;
548 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
549 cmd->body.target.face = 0;
550 cmd->body.target.mipmap = 0;
551 vmw_fifo_commit(dev_priv, sizeof(*cmd));
552
553 return 0;
554}
555
556/**
557 * vmw_binding_scrub_texture - scrub a texture binding from a context.
558 *
559 * @bi: single binding information.
560 * @rebind: Whether to issue a bind instead of scrub command.
561 *
562 * TODO: Possibly complement this function with a function that takes
563 * a list of texture bindings and combines them to a single command.
564 */
565static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
566 bool rebind)
567{
568 struct vmw_ctx_bindinfo_tex *binding =
569 container_of(bi, typeof(*binding), bi);
570 struct vmw_private *dev_priv = bi->ctx->dev_priv;
571 struct {
572 SVGA3dCmdHeader header;
573 struct {
574 SVGA3dCmdSetTextureState c;
575 SVGA3dTextureState s1;
576 } body;
577 } *cmd;
578
579 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
580 if (unlikely(cmd == NULL)) {
581 DRM_ERROR("Failed reserving FIFO space for texture "
582 "unbinding.\n");
583 return -ENOMEM;
584 }
585
586 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
587 cmd->header.size = sizeof(cmd->body);
588 cmd->body.c.cid = bi->ctx->id;
589 cmd->body.s1.stage = binding->texture_stage;
590 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
591 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
592 vmw_fifo_commit(dev_priv, sizeof(*cmd));
593
594 return 0;
595}
596
597/**
598 * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
599 *
600 * @bi: single binding information.
601 * @rebind: Whether to issue a bind instead of scrub command.
602 */
603static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
604{
605 struct vmw_ctx_bindinfo_shader *binding =
606 container_of(bi, typeof(*binding), bi);
607 struct vmw_private *dev_priv = bi->ctx->dev_priv;
608 struct {
609 SVGA3dCmdHeader header;
610 SVGA3dCmdDXSetShader body;
611 } *cmd;
612
613 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
614 if (unlikely(cmd == NULL)) {
615 DRM_ERROR("Failed reserving FIFO space for DX shader "
616 "unbinding.\n");
617 return -ENOMEM;
618 }
619 cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
620 cmd->header.size = sizeof(cmd->body);
621 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
622 cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
623 vmw_fifo_commit(dev_priv, sizeof(*cmd));
624
625 return 0;
626}
627
628/**
629 * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
630 *
631 * @bi: single binding information.
632 * @rebind: Whether to issue a bind instead of scrub command.
633 */
634static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
635{
636 struct vmw_ctx_bindinfo_cb *binding =
637 container_of(bi, typeof(*binding), bi);
638 struct vmw_private *dev_priv = bi->ctx->dev_priv;
639 struct {
640 SVGA3dCmdHeader header;
641 SVGA3dCmdDXSetSingleConstantBuffer body;
642 } *cmd;
643
644 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
645 if (unlikely(cmd == NULL)) {
646 DRM_ERROR("Failed reserving FIFO space for DX shader "
647 "unbinding.\n");
648 return -ENOMEM;
649 }
650
651 cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
652 cmd->header.size = sizeof(cmd->body);
653 cmd->body.slot = binding->slot;
654 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
655 if (rebind) {
656 cmd->body.offsetInBytes = binding->offset;
657 cmd->body.sizeInBytes = binding->size;
658 cmd->body.sid = bi->res->id;
659 } else {
660 cmd->body.offsetInBytes = 0;
661 cmd->body.sizeInBytes = 0;
662 cmd->body.sid = SVGA3D_INVALID_ID;
663 }
664 vmw_fifo_commit(dev_priv, sizeof(*cmd));
665
666 return 0;
667}
668
669/**
670 * vmw_collect_view_ids - Build view id data for a view binding command
671 * without checking which bindings actually need to be emitted
672 *
673 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
674 * @bi: Pointer to where the binding info array is stored in @cbs
675 * @max_num: Maximum number of entries in the @bi array.
676 *
677 * Scans the @bi array for bindings and builds a buffer of view id data.
678 * Stops at the first non-existing binding in the @bi array.
679 * On output, @cbs->bind_cmd_count contains the number of bindings to be
680 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
681 * contains the command data.
682 */
683static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
684 const struct vmw_ctx_bindinfo *bi,
685 u32 max_num)
686{
687 const struct vmw_ctx_bindinfo_view *biv =
688 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
689 unsigned long i;
690
691 cbs->bind_cmd_count = 0;
692 cbs->bind_first_slot = 0;
693
694 for (i = 0; i < max_num; ++i, ++biv) {
695 if (!biv->bi.ctx)
696 break;
697
698 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
699 ((biv->bi.scrubbed) ?
700 SVGA3D_INVALID_ID : biv->bi.res->id);
701 }
702}
703
704/**
705 * vmw_collect_dirty_view_ids - Build view id data for a view binding command
706 *
707 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
708 * @bi: Pointer to where the binding info array is stored in @cbs
709 * @dirty: Bitmap indicating which bindings need to be emitted.
710 * @max_num: Maximum number of entries in the @bi array.
711 *
712 * Scans the @bi array for bindings that need to be emitted and
713 * builds a buffer of view id data.
714 * On output, @cbs->bind_cmd_count contains the number of bindings to be
715 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
716 * binding, and @cbs->bind_cmd_buffer contains the command data.
717 */
718static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
719 const struct vmw_ctx_bindinfo *bi,
720 unsigned long *dirty,
721 u32 max_num)
722{
723 const struct vmw_ctx_bindinfo_view *biv =
724 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
725 unsigned long i, next_bit;
726
727 cbs->bind_cmd_count = 0;
728 i = find_first_bit(dirty, max_num);
729 next_bit = i;
730 cbs->bind_first_slot = i;
731
732 biv += i;
733 for (; i < max_num; ++i, ++biv) {
734 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
735 ((!biv->bi.ctx || biv->bi.scrubbed) ?
736 SVGA3D_INVALID_ID : biv->bi.res->id);
737
738 if (next_bit == i) {
739 next_bit = find_next_bit(dirty, max_num, i + 1);
740 if (next_bit >= max_num)
741 break;
742 }
743 }
744}
745
746/**
747 * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
748 *
749 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
750 */
751static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
752 int shader_slot)
753{
754 const struct vmw_ctx_bindinfo *loc =
755 &cbs->per_shader[shader_slot].shader_res[0].bi;
756 struct {
757 SVGA3dCmdHeader header;
758 SVGA3dCmdDXSetShaderResources body;
759 } *cmd;
760 size_t cmd_size, view_id_size;
761 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
762
763 vmw_collect_dirty_view_ids(cbs, loc,
764 cbs->per_shader[shader_slot].dirty_sr,
765 SVGA3D_DX_MAX_SRVIEWS);
766 if (cbs->bind_cmd_count == 0)
767 return 0;
768
769 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
770 cmd_size = sizeof(*cmd) + view_id_size;
771 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
772 if (unlikely(cmd == NULL)) {
773 DRM_ERROR("Failed reserving FIFO space for DX shader"
774 " resource binding.\n");
775 return -ENOMEM;
776 }
777
778 cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
779 cmd->header.size = sizeof(cmd->body) + view_id_size;
780 cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
781 cmd->body.startView = cbs->bind_first_slot;
782
783 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
784
785 vmw_fifo_commit(ctx->dev_priv, cmd_size);
786 bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
787 cbs->bind_first_slot, cbs->bind_cmd_count);
788
789 return 0;
790}
791
792/**
793 * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
794 *
795 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
796 */
797static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
798{
799 const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
800 struct {
801 SVGA3dCmdHeader header;
802 SVGA3dCmdDXSetRenderTargets body;
803 } *cmd;
804 size_t cmd_size, view_id_size;
805 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
806
807 vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
808 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
809 cmd_size = sizeof(*cmd) + view_id_size;
810 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
811 if (unlikely(cmd == NULL)) {
812 DRM_ERROR("Failed reserving FIFO space for DX render-target"
813 " binding.\n");
814 return -ENOMEM;
815 }
816
817 cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
818 cmd->header.size = sizeof(cmd->body) + view_id_size;
819
820 if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
821 cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
822 else
823 cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
824
825 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
826
827 vmw_fifo_commit(ctx->dev_priv, cmd_size);
828
829 return 0;
830
831}
832
833/**
834 * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
835 * without checking which bindings actually need to be emitted
836 *
837 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
838 * @bi: Pointer to where the binding info array is stored in @cbs
839 * @max_num: Maximum number of entries in the @bi array.
840 *
841 * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
842 * Stops at the first non-existing binding in the @bi array.
843 * On output, @cbs->bind_cmd_count contains the number of bindings to be
844 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
845 * contains the command data.
846 */
847static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
848 const struct vmw_ctx_bindinfo *bi,
849 u32 max_num)
850{
851 const struct vmw_ctx_bindinfo_so *biso =
852 container_of(bi, struct vmw_ctx_bindinfo_so, bi);
853 unsigned long i;
854 SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
855
856 cbs->bind_cmd_count = 0;
857 cbs->bind_first_slot = 0;
858
859 for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
860 ++cbs->bind_cmd_count) {
861 if (!biso->bi.ctx)
862 break;
863
864 if (!biso->bi.scrubbed) {
865 so_buffer->sid = biso->bi.res->id;
866 so_buffer->offset = biso->offset;
867 so_buffer->sizeInBytes = biso->size;
868 } else {
869 so_buffer->sid = SVGA3D_INVALID_ID;
870 so_buffer->offset = 0;
871 so_buffer->sizeInBytes = 0;
872 }
873 }
874}
875
876/**
877 * vmw_binding_emit_set_so - Issue delayed streamout binding commands
878 *
879 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
880 */
881static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
882{
883 const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
884 struct {
885 SVGA3dCmdHeader header;
886 SVGA3dCmdDXSetSOTargets body;
887 } *cmd;
888 size_t cmd_size, so_target_size;
889 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
890
891 vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
892 if (cbs->bind_cmd_count == 0)
893 return 0;
894
895 so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
896 cmd_size = sizeof(*cmd) + so_target_size;
897 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
898 if (unlikely(cmd == NULL)) {
899 DRM_ERROR("Failed reserving FIFO space for DX SO target"
900 " binding.\n");
901 return -ENOMEM;
902 }
903
904 cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
905 cmd->header.size = sizeof(cmd->body) + so_target_size;
906 memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
907
908 vmw_fifo_commit(ctx->dev_priv, cmd_size);
909
910 return 0;
911
912}
913
914/**
915 * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
916 *
917 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
918 *
919 */
920static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
921{
922 struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
923 u32 i;
924 int ret;
925
926 for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
927 if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
928 continue;
929
930 ret = vmw_emit_set_sr(cbs, i);
931 if (ret)
932 break;
933
934 __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
935 }
936
937 return 0;
938}
939
940/**
941 * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
942 * SVGA3dCmdDXSetVertexBuffers command
943 *
944 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
945 * @bi: Pointer to where the binding info array is stored in @cbs
946 * @dirty: Bitmap indicating which bindings need to be emitted.
947 * @max_num: Maximum number of entries in the @bi array.
948 *
949 * Scans the @bi array for bindings that need to be emitted and
950 * builds a buffer of SVGA3dVertexBuffer data.
951 * On output, @cbs->bind_cmd_count contains the number of bindings to be
952 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
953 * binding, and @cbs->bind_cmd_buffer contains the command data.
954 */
955static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
956 const struct vmw_ctx_bindinfo *bi,
957 unsigned long *dirty,
958 u32 max_num)
959{
960 const struct vmw_ctx_bindinfo_vb *biv =
961 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
962 unsigned long i, next_bit;
963 SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
964
965 cbs->bind_cmd_count = 0;
966 i = find_first_bit(dirty, max_num);
967 next_bit = i;
968 cbs->bind_first_slot = i;
969
970 biv += i;
971 for (; i < max_num; ++i, ++biv, ++vbs) {
972 if (!biv->bi.ctx || biv->bi.scrubbed) {
973 vbs->sid = SVGA3D_INVALID_ID;
974 vbs->stride = 0;
975 vbs->offset = 0;
976 } else {
977 vbs->sid = biv->bi.res->id;
978 vbs->stride = biv->stride;
979 vbs->offset = biv->offset;
980 }
981 cbs->bind_cmd_count++;
982 if (next_bit == i) {
983 next_bit = find_next_bit(dirty, max_num, i + 1);
984 if (next_bit >= max_num)
985 break;
986 }
987 }
988}
989
990/**
991 * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
992 *
993 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
994 *
995 */
996static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
997{
998 const struct vmw_ctx_bindinfo *loc =
999 &cbs->vertex_buffers[0].bi;
1000 struct {
1001 SVGA3dCmdHeader header;
1002 SVGA3dCmdDXSetVertexBuffers body;
1003 } *cmd;
1004 size_t cmd_size, set_vb_size;
1005 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1006
1007 vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1008 SVGA3D_DX_MAX_VERTEXBUFFERS);
1009 if (cbs->bind_cmd_count == 0)
1010 return 0;
1011
1012 set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1013 cmd_size = sizeof(*cmd) + set_vb_size;
1014 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
1015 if (unlikely(cmd == NULL)) {
1016 DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
1017 " binding.\n");
1018 return -ENOMEM;
1019 }
1020
1021 cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1022 cmd->header.size = sizeof(cmd->body) + set_vb_size;
1023 cmd->body.startBuffer = cbs->bind_first_slot;
1024
1025 memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1026
1027 vmw_fifo_commit(ctx->dev_priv, cmd_size);
1028 bitmap_clear(cbs->dirty_vb,
1029 cbs->bind_first_slot, cbs->bind_cmd_count);
1030
1031 return 0;
1032}
1033
1034/**
1035 * vmw_binding_emit_dirty - Issue delayed binding commands
1036 *
1037 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1038 *
1039 * This function issues the delayed binding commands that arise from
1040 * previous scrub / unscrub calls. These binding commands are typically
1041 * commands that batch a number of bindings and therefore it makes sense
1042 * to delay them.
1043 */
1044static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1045{
1046 int ret = 0;
1047 unsigned long hit = 0;
1048
1049 while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1050 < VMW_BINDING_NUM_BITS) {
1051
1052 switch (hit) {
1053 case VMW_BINDING_RT_BIT:
1054 ret = vmw_emit_set_rt(cbs);
1055 break;
1056 case VMW_BINDING_PS_BIT:
1057 ret = vmw_binding_emit_dirty_ps(cbs);
1058 break;
1059 case VMW_BINDING_SO_BIT:
1060 ret = vmw_emit_set_so(cbs);
1061 break;
1062 case VMW_BINDING_VB_BIT:
1063 ret = vmw_emit_set_vb(cbs);
1064 break;
1065 default:
1066 BUG();
1067 }
1068 if (ret)
1069 return ret;
1070
1071 __clear_bit(hit, &cbs->dirty);
1072 hit++;
1073 }
1074
1075 return 0;
1076}
1077
1078/**
1079 * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
1080 * scrub from a context
1081 *
1082 * @bi: single binding information.
1083 * @rebind: Whether to issue a bind instead of scrub command.
1084 */
1085static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1086{
1087 struct vmw_ctx_bindinfo_view *biv =
1088 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1089 struct vmw_ctx_binding_state *cbs =
1090 vmw_context_binding_state(bi->ctx);
1091
1092 __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1093 __set_bit(VMW_BINDING_PS_SR_BIT,
1094 &cbs->per_shader[biv->shader_slot].dirty);
1095 __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1096
1097 return 0;
1098}
1099
1100/**
1101 * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
1102 * scrub from a context
1103 *
1104 * @bi: single binding information.
1105 * @rebind: Whether to issue a bind instead of scrub command.
1106 */
1107static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1108{
1109 struct vmw_ctx_binding_state *cbs =
1110 vmw_context_binding_state(bi->ctx);
1111
1112 __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1113
1114 return 0;
1115}
1116
1117/**
1118 * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
1119 * scrub from a context
1120 *
1121 * @bi: single binding information.
1122 * @rebind: Whether to issue a bind instead of scrub command.
1123 */
1124static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1125{
1126 struct vmw_ctx_binding_state *cbs =
1127 vmw_context_binding_state(bi->ctx);
1128
1129 __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
1130
1131 return 0;
1132}
1133
1134/**
1135 * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
1136 * scrub from a context
1137 *
1138 * @bi: single binding information.
1139 * @rebind: Whether to issue a bind instead of scrub command.
1140 */
1141static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1142{
1143 struct vmw_ctx_bindinfo_vb *bivb =
1144 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1145 struct vmw_ctx_binding_state *cbs =
1146 vmw_context_binding_state(bi->ctx);
1147
1148 __set_bit(bivb->slot, cbs->dirty_vb);
1149 __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1150
1151 return 0;
1152}
1153
1154/**
1155 * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
1156 *
1157 * @bi: single binding information.
1158 * @rebind: Whether to issue a bind instead of scrub command.
1159 */
1160static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1161{
1162 struct vmw_ctx_bindinfo_ib *binding =
1163 container_of(bi, typeof(*binding), bi);
1164 struct vmw_private *dev_priv = bi->ctx->dev_priv;
1165 struct {
1166 SVGA3dCmdHeader header;
1167 SVGA3dCmdDXSetIndexBuffer body;
1168 } *cmd;
1169
1170 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
1171 if (unlikely(cmd == NULL)) {
1172 DRM_ERROR("Failed reserving FIFO space for DX index buffer "
1173 "binding.\n");
1174 return -ENOMEM;
1175 }
1176 cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1177 cmd->header.size = sizeof(cmd->body);
1178 if (rebind) {
1179 cmd->body.sid = bi->res->id;
1180 cmd->body.format = binding->format;
1181 cmd->body.offset = binding->offset;
1182 } else {
1183 cmd->body.sid = SVGA3D_INVALID_ID;
1184 cmd->body.format = 0;
1185 cmd->body.offset = 0;
1186 }
1187
1188 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1189
1190 return 0;
1191}
1192
1193/**
1194 * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
1195 * memory accounting.
1196 *
1197 * @dev_priv: Pointer to a device private structure.
1198 *
1199 * Returns a pointer to a newly allocated struct or an error pointer on error.
1200 */
1201struct vmw_ctx_binding_state *
1202vmw_binding_state_alloc(struct vmw_private *dev_priv)
1203{
1204 struct vmw_ctx_binding_state *cbs;
1205 int ret;
1206
1207 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
1208 false, false);
1209 if (ret)
1210 return ERR_PTR(ret);
1211
1212 cbs = vzalloc(sizeof(*cbs));
1213 if (!cbs) {
1214 ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1215 return ERR_PTR(-ENOMEM);
1216 }
1217
1218 cbs->dev_priv = dev_priv;
1219 INIT_LIST_HEAD(&cbs->list);
1220
1221 return cbs;
1222}
1223
1224/**
1225 * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
1226 * memory accounting info.
1227 *
1228 * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
1229 */
1230void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1231{
1232 struct vmw_private *dev_priv = cbs->dev_priv;
1233
1234 vfree(cbs);
1235 ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1236}
1237
1238/**
1239 * vmw_binding_state_list - Get the binding list of a
1240 * struct vmw_ctx_binding_state
1241 *
1242 * @cbs: Pointer to the struct vmw_ctx_binding_state
1243 *
1244 * Returns the binding list which can be used to traverse through the bindings
1245 * and access the resource information of all bindings.
1246 */
1247struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1248{
1249 return &cbs->list;
1250}
1251
1252/**
1253 * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
1254 *
1255 * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
1256 *
1257 * Drops all bindings registered in @cbs. No device binding actions are
1258 * performed.
1259 */
1260void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1261{
1262 struct vmw_ctx_bindinfo *entry, *next;
1263
1264 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1265 vmw_binding_drop(entry);
1266}
1267
1268/*
1269 * This function is unused at run-time, and only used to hold various build
1270 * asserts important for code optimization assumptions.
1271 */
1272static void vmw_binding_build_asserts(void)
1273{
1274 BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1275 BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
1276 BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1277
1278 /*
1279 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
1280 * view id arrays.
1281 */
1282 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1283 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1284 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1285
1286 /*
1287 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
1288 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
1289 */
1290 BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1291 VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1292 BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1293 VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1294}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 000000000000..bf2e77ad5a20
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#ifndef _VMWGFX_BINDING_H_
28#define _VMWGFX_BINDING_H_
29
30#include "device_include/svga3d_reg.h"
31#include <linux/list.h>
32
33#define VMW_MAX_VIEW_BINDINGS 128
34
35struct vmw_private;
36struct vmw_ctx_binding_state;
37
38/*
39 * enum vmw_ctx_binding_type - abstract resource to context binding types
40 */
41enum vmw_ctx_binding_type {
42 vmw_ctx_binding_shader,
43 vmw_ctx_binding_rt,
44 vmw_ctx_binding_tex,
45 vmw_ctx_binding_cb,
46 vmw_ctx_binding_dx_shader,
47 vmw_ctx_binding_dx_rt,
48 vmw_ctx_binding_sr,
49 vmw_ctx_binding_ds,
50 vmw_ctx_binding_so,
51 vmw_ctx_binding_vb,
52 vmw_ctx_binding_ib,
53 vmw_ctx_binding_max
54};
55
56/**
57 * struct vmw_ctx_bindinfo - single binding metadata
58 *
59 * @ctx_list: List head for the context's list of bindings.
60 * @res_list: List head for a resource's list of bindings.
61 * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
62 * indicates no binding present.
63 * @res: Non-refcounted pointer to the resource the binding points to. This
64 * is typically a surface or a view.
65 * @bt: Binding type.
66 * @scrubbed: Whether the binding has been scrubbed from the context.
67 */
68struct vmw_ctx_bindinfo {
69 struct list_head ctx_list;
70 struct list_head res_list;
71 struct vmw_resource *ctx;
72 struct vmw_resource *res;
73 enum vmw_ctx_binding_type bt;
74 bool scrubbed;
75};
76
77/**
78 * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
79 *
80 * @bi: struct vmw_ctx_bindinfo we derive from.
81 * @texture_stage: Device data used to reconstruct binding command.
82 */
83struct vmw_ctx_bindinfo_tex {
84 struct vmw_ctx_bindinfo bi;
85 uint32 texture_stage;
86};
87
88/**
89 * struct vmw_ctx_bindinfo_shader - Shader binding metadata
90 *
91 * @bi: struct vmw_ctx_bindinfo we derive from.
92 * @shader_slot: Device data used to reconstruct binding command.
93 */
94struct vmw_ctx_bindinfo_shader {
95 struct vmw_ctx_bindinfo bi;
96 SVGA3dShaderType shader_slot;
97};
98
99/**
100 * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
101 *
102 * @bi: struct vmw_ctx_bindinfo we derive from.
103 * @shader_slot: Device data used to reconstruct binding command.
104 * @offset: Device data used to reconstruct binding command.
105 * @size: Device data used to reconstruct binding command.
106 * @slot: Device data used to reconstruct binding command.
107 */
108struct vmw_ctx_bindinfo_cb {
109 struct vmw_ctx_bindinfo bi;
110 SVGA3dShaderType shader_slot;
111 uint32 offset;
112 uint32 size;
113 uint32 slot;
114};
115
116/**
117 * struct vmw_ctx_bindinfo_view - View binding metadata
118 *
119 * @bi: struct vmw_ctx_bindinfo we derive from.
120 * @shader_slot: Device data used to reconstruct binding command.
121 * @slot: Device data used to reconstruct binding command.
122 */
123struct vmw_ctx_bindinfo_view {
124 struct vmw_ctx_bindinfo bi;
125 SVGA3dShaderType shader_slot;
126 uint32 slot;
127};
128
129/**
130 * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
131 *
132 * @bi: struct vmw_ctx_bindinfo we derive from.
133 * @offset: Device data used to reconstruct binding command.
134 * @size: Device data used to reconstruct binding command.
135 * @slot: Device data used to reconstruct binding command.
136 */
137struct vmw_ctx_bindinfo_so {
138 struct vmw_ctx_bindinfo bi;
139 uint32 offset;
140 uint32 size;
141 uint32 slot;
142};
143
144/**
145 * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
146 *
147 * @bi: struct vmw_ctx_bindinfo we derive from.
148 * @offset: Device data used to reconstruct binding command.
149 * @stride: Device data used to reconstruct binding command.
150 * @slot: Device data used to reconstruct binding command.
151 */
152struct vmw_ctx_bindinfo_vb {
153 struct vmw_ctx_bindinfo bi;
154 uint32 offset;
155 uint32 stride;
156 uint32 slot;
157};
158
159/**
160 * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
161 *
162 * @bi: struct vmw_ctx_bindinfo we derive from.
163 * @offset: Device data used to reconstruct binding command.
164 * @format: Device data used to reconstruct binding command.
165 */
166struct vmw_ctx_bindinfo_ib {
167 struct vmw_ctx_bindinfo bi;
168 uint32 offset;
169 uint32 format;
170};
171
172/**
173 * struct vmw_dx_shader_bindings - per shader type context binding state
174 *
175 * @shader: The shader binding for this shader type
176 * @const_buffer: Const buffer bindings for this shader type.
177 * @shader_res: Shader resource view bindings for this shader type.
178 * @dirty_sr: Bitmap tracking individual shader resource bindings changes
179 * that have not yet been emitted to the device.
180 * @dirty: Bitmap tracking per-binding type binding changes that have not
181 * yet been emitted to the device.
182 */
183struct vmw_dx_shader_bindings {
184 struct vmw_ctx_bindinfo_shader shader;
185 struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
186 struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
187 DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
188 unsigned long dirty;
189};
190
191extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
192 const struct vmw_ctx_bindinfo *ci,
193 u32 shader_slot, u32 slot);
194extern void
195vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
196 struct vmw_ctx_binding_state *from);
197extern void vmw_binding_res_list_kill(struct list_head *head);
198extern void vmw_binding_res_list_scrub(struct list_head *head);
199extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
200extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
201extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
202extern struct vmw_ctx_binding_state *
203vmw_binding_state_alloc(struct vmw_private *dev_priv);
204extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
205extern struct list_head *
206vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
207extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
208
209#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9db9d2..3329f623c8bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73}; 73};
74 74
75static struct ttm_place mob_ne_placement_flags = {
76 .fpfn = 0,
77 .lpfn = 0,
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79};
80
75struct ttm_placement vmw_vram_placement = { 81struct ttm_placement vmw_vram_placement = {
76 .num_placement = 1, 82 .num_placement = 1,
77 .placement = &vram_placement_flags, 83 .placement = &vram_placement_flags,
@@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
200 .busy_placement = &mob_placement_flags 206 .busy_placement = &mob_placement_flags
201}; 207};
202 208
209struct ttm_placement vmw_mob_ne_placement = {
210 .num_placement = 1,
211 .num_busy_placement = 1,
212 .placement = &mob_ne_placement_flags,
213 .busy_placement = &mob_ne_placement_flags
214};
215
203struct vmw_ttm_tt { 216struct vmw_ttm_tt {
204 struct ttm_dma_tt dma_ttm; 217 struct ttm_dma_tt dma_ttm;
205 struct vmw_private *dev_priv; 218 struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
804/** 817/**
805 * vmw_move_notify - TTM move_notify_callback 818 * vmw_move_notify - TTM move_notify_callback
806 * 819 *
807 * @bo: The TTM buffer object about to move. 820 * @bo: The TTM buffer object about to move.
808 * @mem: The truct ttm_mem_reg indicating to what memory 821 * @mem: The struct ttm_mem_reg indicating to what memory
809 * region the move is taking place. 822 * region the move is taking place.
810 * 823 *
811 * Calls move_notify for all subsystems needing it. 824 * Calls move_notify for all subsystems needing it.
812 * (currently only resources). 825 * (currently only resources).
@@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
815 struct ttm_mem_reg *mem) 828 struct ttm_mem_reg *mem)
816{ 829{
817 vmw_resource_move_notify(bo, mem); 830 vmw_resource_move_notify(bo, mem);
831 vmw_query_move_notify(bo, mem);
818} 832}
819 833
820 834
821/** 835/**
822 * vmw_swap_notify - TTM move_notify_callback 836 * vmw_swap_notify - TTM move_notify_callback
823 * 837 *
824 * @bo: The TTM buffer object about to be swapped out. 838 * @bo: The TTM buffer object about to be swapped out.
825 */ 839 */
826static void vmw_swap_notify(struct ttm_buffer_object *bo) 840static void vmw_swap_notify(struct ttm_buffer_object *bo)
827{ 841{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 000000000000..5ae8f921da2a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_api.h"
30
31/*
32 * Size of inline command buffers. Try to make sure that a page size is a
33 * multiple of the DMA pool allocation size.
34 */
35#define VMW_CMDBUF_INLINE_ALIGN 64
36#define VMW_CMDBUF_INLINE_SIZE \
37 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
38
39/**
40 * struct vmw_cmdbuf_context - Command buffer context queues
41 *
42 * @submitted: List of command buffers that have been submitted to the
43 * manager but not yet submitted to hardware.
44 * @hw_submitted: List of command buffers submitted to hardware.
45 * @preempted: List of preempted command buffers.
46 * @num_hw_submitted: Number of buffers currently being processed by hardware
47 */
48struct vmw_cmdbuf_context {
49 struct list_head submitted;
50 struct list_head hw_submitted;
51 struct list_head preempted;
52 unsigned num_hw_submitted;
53};
54
55/**
56 * struct vmw_cmdbuf_man: - Command buffer manager
57 *
58 * @cur_mutex: Mutex protecting the command buffer used for incremental small
59 * kernel command submissions, @cur.
60 * @space_mutex: Mutex to protect against starvation when we allocate
61 * main pool buffer space.
62 * @work: A struct work_struct implementeing command buffer error handling.
63 * Immutable.
64 * @dev_priv: Pointer to the device private struct. Immutable.
65 * @ctx: Array of command buffer context queues. The queues and the context
66 * data is protected by @lock.
67 * @error: List of command buffers that have caused device errors.
68 * Protected by @lock.
69 * @mm: Range manager for the command buffer space. Manager allocations and
70 * frees are protected by @lock.
71 * @cmd_space: Buffer object for the command buffer space, unless we were
72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
73 * @map_obj: Mapping state for @cmd_space. Immutable.
74 * @map: Pointer to command buffer space. May be a mapped buffer object or
75 * a contigous coherent DMA memory allocation. Immutable.
76 * @cur: Command buffer for small kernel command submissions. Protected by
77 * the @cur_mutex.
78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
79 * @default_size: Default size for the @cur command buffer. Immutable.
80 * @max_hw_submitted: Max number of in-flight command buffers the device can
81 * handle. Immutable.
82 * @lock: Spinlock protecting command submission queues.
83 * @header: Pool of DMA memory for device command buffer headers.
84 * Internal protection.
85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
86 * space for inline data. Internal protection.
87 * @tasklet: Tasklet struct for irq processing. Immutable.
88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
89 * space.
90 * @idle_queue: Wait queue for processes waiting for command buffer idle.
91 * @irq_on: Whether the process function has requested irq to be turned on.
92 * Protected by @lock.
93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
94 * allocation. Immutable.
95 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
96 * Typically this is false only during bootstrap.
97 * @handle: DMA address handle for the command buffer space if @using_mob is
98 * false. Immutable.
99 * @size: The size of the command buffer space. Immutable.
100 */
101struct vmw_cmdbuf_man {
102 struct mutex cur_mutex;
103 struct mutex space_mutex;
104 struct work_struct work;
105 struct vmw_private *dev_priv;
106 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
107 struct list_head error;
108 struct drm_mm mm;
109 struct ttm_buffer_object *cmd_space;
110 struct ttm_bo_kmap_obj map_obj;
111 u8 *map;
112 struct vmw_cmdbuf_header *cur;
113 size_t cur_pos;
114 size_t default_size;
115 unsigned max_hw_submitted;
116 spinlock_t lock;
117 struct dma_pool *headers;
118 struct dma_pool *dheaders;
119 struct tasklet_struct tasklet;
120 wait_queue_head_t alloc_queue;
121 wait_queue_head_t idle_queue;
122 bool irq_on;
123 bool using_mob;
124 bool has_pool;
125 dma_addr_t handle;
126 size_t size;
127};
128
129/**
130 * struct vmw_cmdbuf_header - Command buffer metadata
131 *
132 * @man: The command buffer manager.
133 * @cb_header: Device command buffer header, allocated from a DMA pool.
134 * @cb_context: The device command buffer context.
135 * @list: List head for attaching to the manager lists.
136 * @node: The range manager node.
137 * @handle. The DMA address of @cb_header. Handed to the device on command
138 * buffer submission.
139 * @cmd: Pointer to the command buffer space of this buffer.
140 * @size: Size of the command buffer space of this buffer.
141 * @reserved: Reserved space of this buffer.
142 * @inline_space: Whether inline command buffer space is used.
143 */
144struct vmw_cmdbuf_header {
145 struct vmw_cmdbuf_man *man;
146 SVGACBHeader *cb_header;
147 SVGACBContext cb_context;
148 struct list_head list;
149 struct drm_mm_node node;
150 dma_addr_t handle;
151 u8 *cmd;
152 size_t size;
153 size_t reserved;
154 bool inline_space;
155};
156
157/**
158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
159 * command buffer space.
160 *
161 * @cb_header: Device command buffer header.
162 * @cmd: Inline command buffer space.
163 */
164struct vmw_cmdbuf_dheader {
165 SVGACBHeader cb_header;
166 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
167};
168
169/**
170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
171 *
172 * @page_size: Size of requested command buffer space in pages.
173 * @node: Pointer to the range manager node.
174 * @done: True if this allocation has succeeded.
175 */
176struct vmw_cmdbuf_alloc_info {
177 size_t page_size;
178 struct drm_mm_node *node;
179 bool done;
180};
181
182/* Loop over each context in the command buffer manager. */
183#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
184 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
185 ++(_i), ++(_ctx))
186
187static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
188
189
190/**
191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
192 *
193 * @man: The range manager.
194 * @interruptible: Whether to wait interruptible when locking.
195 */
196static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
197{
198 if (interruptible) {
199 if (mutex_lock_interruptible(&man->cur_mutex))
200 return -ERESTARTSYS;
201 } else {
202 mutex_lock(&man->cur_mutex);
203 }
204
205 return 0;
206}
207
208/**
209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
210 *
211 * @man: The range manager.
212 */
213static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
214{
215 mutex_unlock(&man->cur_mutex);
216}
217
218/**
219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
220 * been used for the device context with inline command buffers.
221 * Need not be called locked.
222 *
223 * @header: Pointer to the header to free.
224 */
225static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
226{
227 struct vmw_cmdbuf_dheader *dheader;
228
229 if (WARN_ON_ONCE(!header->inline_space))
230 return;
231
232 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
233 cb_header);
234 dma_pool_free(header->man->dheaders, dheader, header->handle);
235 kfree(header);
236}
237
238/**
239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
240 * associated structures.
241 *
242 * header: Pointer to the header to free.
243 *
244 * For internal use. Must be called with man::lock held.
245 */
246static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
247{
248 struct vmw_cmdbuf_man *man = header->man;
249
250 BUG_ON(!spin_is_locked(&man->lock));
251
252 if (header->inline_space) {
253 vmw_cmdbuf_header_inline_free(header);
254 return;
255 }
256
257 drm_mm_remove_node(&header->node);
258 wake_up_all(&man->alloc_queue);
259 if (header->cb_header)
260 dma_pool_free(man->headers, header->cb_header,
261 header->handle);
262 kfree(header);
263}
264
265/**
266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
267 * associated structures.
268 *
269 * @header: Pointer to the header to free.
270 */
271void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
272{
273 struct vmw_cmdbuf_man *man = header->man;
274
275 /* Avoid locking if inline_space */
276 if (header->inline_space) {
277 vmw_cmdbuf_header_inline_free(header);
278 return;
279 }
280 spin_lock_bh(&man->lock);
281 __vmw_cmdbuf_header_free(header);
282 spin_unlock_bh(&man->lock);
283}
284
285
286/**
287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
288 *
289 * @header: The header of the buffer to submit.
290 */
291static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
292{
293 struct vmw_cmdbuf_man *man = header->man;
294 u32 val;
295
296 if (sizeof(header->handle) > 4)
297 val = (header->handle >> 32);
298 else
299 val = 0;
300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
301
302 val = (header->handle & 0xFFFFFFFFULL);
303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
305
306 return header->cb_header->status;
307}
308
309/**
310 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
311 *
312 * @ctx: The command buffer context to initialize
313 */
314static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
315{
316 INIT_LIST_HEAD(&ctx->hw_submitted);
317 INIT_LIST_HEAD(&ctx->submitted);
318 INIT_LIST_HEAD(&ctx->preempted);
319 ctx->num_hw_submitted = 0;
320}
321
322/**
323 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
324 * context.
325 *
326 * @man: The command buffer manager.
327 * @ctx: The command buffer context.
328 *
329 * Submits command buffers to hardware until there are no more command
330 * buffers to submit or the hardware can't handle more command buffers.
331 */
332static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
333 struct vmw_cmdbuf_context *ctx)
334{
335 while (ctx->num_hw_submitted < man->max_hw_submitted &&
336 !list_empty(&ctx->submitted)) {
337 struct vmw_cmdbuf_header *entry;
338 SVGACBStatus status;
339
340 entry = list_first_entry(&ctx->submitted,
341 struct vmw_cmdbuf_header,
342 list);
343
344 status = vmw_cmdbuf_header_submit(entry);
345
346 /* This should never happen */
347 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
348 entry->cb_header->status = SVGA_CB_STATUS_NONE;
349 break;
350 }
351
352 list_del(&entry->list);
353 list_add_tail(&entry->list, &ctx->hw_submitted);
354 ctx->num_hw_submitted++;
355 }
356
357}
358
359/**
360 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
361 *
362 * @man: The command buffer manager.
363 * @ctx: The command buffer context.
364 *
365 * Submit command buffers to hardware if possible, and process finished
366 * buffers. Typically freeing them, but on preemption or error take
367 * appropriate action. Wake up waiters if appropriate.
368 */
369static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
370 struct vmw_cmdbuf_context *ctx,
371 int *notempty)
372{
373 struct vmw_cmdbuf_header *entry, *next;
374
375 vmw_cmdbuf_ctx_submit(man, ctx);
376
377 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
378 SVGACBStatus status = entry->cb_header->status;
379
380 if (status == SVGA_CB_STATUS_NONE)
381 break;
382
383 list_del(&entry->list);
384 wake_up_all(&man->idle_queue);
385 ctx->num_hw_submitted--;
386 switch (status) {
387 case SVGA_CB_STATUS_COMPLETED:
388 __vmw_cmdbuf_header_free(entry);
389 break;
390 case SVGA_CB_STATUS_COMMAND_ERROR:
391 case SVGA_CB_STATUS_CB_HEADER_ERROR:
392 list_add_tail(&entry->list, &man->error);
393 schedule_work(&man->work);
394 break;
395 case SVGA_CB_STATUS_PREEMPTED:
396 list_add(&entry->list, &ctx->preempted);
397 break;
398 default:
399 WARN_ONCE(true, "Undefined command buffer status.\n");
400 __vmw_cmdbuf_header_free(entry);
401 break;
402 }
403 }
404
405 vmw_cmdbuf_ctx_submit(man, ctx);
406 if (!list_empty(&ctx->submitted))
407 (*notempty)++;
408}
409
410/**
411 * vmw_cmdbuf_man_process - Process all command buffer contexts and
412 * switch on and off irqs as appropriate.
413 *
414 * @man: The command buffer manager.
415 *
416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
417 * command buffers left that are not submitted to hardware, Make sure
418 * IRQ handling is turned on. Otherwise, make sure it's turned off. This
419 * function may return -EAGAIN to indicate it should be rerun due to
420 * possibly missed IRQs if IRQs has just been turned on.
421 */
422static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
423{
424 int notempty = 0;
425 struct vmw_cmdbuf_context *ctx;
426 int i;
427
428 for_each_cmdbuf_ctx(man, i, ctx)
429 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
430
431 if (man->irq_on && !notempty) {
432 vmw_generic_waiter_remove(man->dev_priv,
433 SVGA_IRQFLAG_COMMAND_BUFFER,
434 &man->dev_priv->cmdbuf_waiters);
435 man->irq_on = false;
436 } else if (!man->irq_on && notempty) {
437 vmw_generic_waiter_add(man->dev_priv,
438 SVGA_IRQFLAG_COMMAND_BUFFER,
439 &man->dev_priv->cmdbuf_waiters);
440 man->irq_on = true;
441
442 /* Rerun in case we just missed an irq. */
443 return -EAGAIN;
444 }
445
446 return 0;
447}
448
449/**
450 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
451 * command buffer context
452 *
453 * @man: The command buffer manager.
454 * @header: The header of the buffer to submit.
455 * @cb_context: The command buffer context to use.
456 *
457 * This function adds @header to the "submitted" queue of the command
458 * buffer context identified by @cb_context. It then calls the command buffer
459 * manager processing to potentially submit the buffer to hardware.
460 * @man->lock needs to be held when calling this function.
461 */
462static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
463 struct vmw_cmdbuf_header *header,
464 SVGACBContext cb_context)
465{
466 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
467 header->cb_header->dxContext = 0;
468 header->cb_context = cb_context;
469 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
470
471 if (vmw_cmdbuf_man_process(man) == -EAGAIN)
472 vmw_cmdbuf_man_process(man);
473}
474
475/**
476 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
477 * handler implemented as a tasklet.
478 *
479 * @data: Tasklet closure. A pointer to the command buffer manager cast to
480 * an unsigned long.
481 *
482 * The bottom half (tasklet) of the interrupt handler simply calls into the
483 * command buffer processor to free finished buffers and submit any
484 * queued buffers to hardware.
485 */
486static void vmw_cmdbuf_man_tasklet(unsigned long data)
487{
488 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
489
490 spin_lock(&man->lock);
491 if (vmw_cmdbuf_man_process(man) == -EAGAIN)
492 (void) vmw_cmdbuf_man_process(man);
493 spin_unlock(&man->lock);
494}
495
496/**
497 * vmw_cmdbuf_work_func - The deferred work function that handles
498 * command buffer errors.
499 *
500 * @work: The work func closure argument.
501 *
502 * Restarting the command buffer context after an error requires process
503 * context, so it is deferred to this work function.
504 */
505static void vmw_cmdbuf_work_func(struct work_struct *work)
506{
507 struct vmw_cmdbuf_man *man =
508 container_of(work, struct vmw_cmdbuf_man, work);
509 struct vmw_cmdbuf_header *entry, *next;
510 bool restart = false;
511
512 spin_lock_bh(&man->lock);
513 list_for_each_entry_safe(entry, next, &man->error, list) {
514 restart = true;
515 DRM_ERROR("Command buffer error.\n");
516
517 list_del(&entry->list);
518 __vmw_cmdbuf_header_free(entry);
519 wake_up_all(&man->idle_queue);
520 }
521 spin_unlock_bh(&man->lock);
522
523 if (restart && vmw_cmdbuf_startstop(man, true))
524 DRM_ERROR("Failed restarting command buffer context 0.\n");
525
526}
527
528/**
529 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
530 *
531 * @man: The command buffer manager.
532 * @check_preempted: Check also the preempted queue for pending command buffers.
533 *
534 */
535static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
536 bool check_preempted)
537{
538 struct vmw_cmdbuf_context *ctx;
539 bool idle = false;
540 int i;
541
542 spin_lock_bh(&man->lock);
543 vmw_cmdbuf_man_process(man);
544 for_each_cmdbuf_ctx(man, i, ctx) {
545 if (!list_empty(&ctx->submitted) ||
546 !list_empty(&ctx->hw_submitted) ||
547 (check_preempted && !list_empty(&ctx->preempted)))
548 goto out_unlock;
549 }
550
551 idle = list_empty(&man->error);
552
553out_unlock:
554 spin_unlock_bh(&man->lock);
555
556 return idle;
557}
558
559/**
560 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
561 * command submissions
562 *
563 * @man: The command buffer manager.
564 *
565 * Flushes the current command buffer without allocating a new one. A new one
566 * is automatically allocated when needed. Call with @man->cur_mutex held.
567 */
568static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
569{
570 struct vmw_cmdbuf_header *cur = man->cur;
571
572 WARN_ON(!mutex_is_locked(&man->cur_mutex));
573
574 if (!cur)
575 return;
576
577 spin_lock_bh(&man->lock);
578 if (man->cur_pos == 0) {
579 __vmw_cmdbuf_header_free(cur);
580 goto out_unlock;
581 }
582
583 man->cur->cb_header->length = man->cur_pos;
584 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
585out_unlock:
586 spin_unlock_bh(&man->lock);
587 man->cur = NULL;
588 man->cur_pos = 0;
589}
590
591/**
592 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
593 * command submissions
594 *
595 * @man: The command buffer manager.
596 * @interruptible: Whether to sleep interruptible when sleeping.
597 *
598 * Flushes the current command buffer without allocating a new one. A new one
599 * is automatically allocated when needed.
600 */
601int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
602 bool interruptible)
603{
604 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
605
606 if (ret)
607 return ret;
608
609 __vmw_cmdbuf_cur_flush(man);
610 vmw_cmdbuf_cur_unlock(man);
611
612 return 0;
613}
614
615/**
616 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
617 *
618 * @man: The command buffer manager.
619 * @interruptible: Sleep interruptible while waiting.
620 * @timeout: Time out after this many ticks.
621 *
622 * Wait until the command buffer manager has processed all command buffers,
623 * or until a timeout occurs. If a timeout occurs, the function will return
624 * -EBUSY.
625 */
626int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
627 unsigned long timeout)
628{
629 int ret;
630
631 ret = vmw_cmdbuf_cur_flush(man, interruptible);
632 vmw_generic_waiter_add(man->dev_priv,
633 SVGA_IRQFLAG_COMMAND_BUFFER,
634 &man->dev_priv->cmdbuf_waiters);
635
636 if (interruptible) {
637 ret = wait_event_interruptible_timeout
638 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
639 timeout);
640 } else {
641 ret = wait_event_timeout
642 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
643 timeout);
644 }
645 vmw_generic_waiter_remove(man->dev_priv,
646 SVGA_IRQFLAG_COMMAND_BUFFER,
647 &man->dev_priv->cmdbuf_waiters);
648 if (ret == 0) {
649 if (!vmw_cmdbuf_man_idle(man, true))
650 ret = -EBUSY;
651 else
652 ret = 0;
653 }
654 if (ret > 0)
655 ret = 0;
656
657 return ret;
658}
659
660/**
661 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
662 *
663 * @man: The command buffer manager.
664 * @info: Allocation info. Will hold the size on entry and allocated mm node
665 * on successful return.
666 *
667 * Try to allocate buffer space from the main pool. Returns true if succeeded.
668 * If a fatal error was hit, the error code is returned in @info->ret.
669 */
670static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
671 struct vmw_cmdbuf_alloc_info *info)
672{
673 int ret;
674
675 if (info->done)
676 return true;
677
678 memset(info->node, 0, sizeof(*info->node));
679 spin_lock_bh(&man->lock);
680 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
681 0, 0,
682 DRM_MM_SEARCH_DEFAULT,
683 DRM_MM_CREATE_DEFAULT);
684 spin_unlock_bh(&man->lock);
685 info->done = !ret;
686
687 return info->done;
688}
689
690/**
691 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
692 *
693 * @man: The command buffer manager.
694 * @node: Pointer to pre-allocated range-manager node.
695 * @size: The size of the allocation.
696 * @interruptible: Whether to sleep interruptible while waiting for space.
697 *
698 * This function allocates buffer space from the main pool, and if there is
699 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
700 * become available.
701 */
702static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
703 struct drm_mm_node *node,
704 size_t size,
705 bool interruptible)
706{
707 struct vmw_cmdbuf_alloc_info info;
708
709 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
710 info.node = node;
711 info.done = false;
712
713 /*
714 * To prevent starvation of large requests, only one allocating call
715 * at a time waiting for space.
716 */
717 if (interruptible) {
718 if (mutex_lock_interruptible(&man->space_mutex))
719 return -ERESTARTSYS;
720 } else {
721 mutex_lock(&man->space_mutex);
722 }
723
724 /* Try to allocate space without waiting. */
725 if (vmw_cmdbuf_try_alloc(man, &info))
726 goto out_unlock;
727
728 vmw_generic_waiter_add(man->dev_priv,
729 SVGA_IRQFLAG_COMMAND_BUFFER,
730 &man->dev_priv->cmdbuf_waiters);
731
732 if (interruptible) {
733 int ret;
734
735 ret = wait_event_interruptible
736 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
737 if (ret) {
738 vmw_generic_waiter_remove
739 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
740 &man->dev_priv->cmdbuf_waiters);
741 mutex_unlock(&man->space_mutex);
742 return ret;
743 }
744 } else {
745 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
746 }
747 vmw_generic_waiter_remove(man->dev_priv,
748 SVGA_IRQFLAG_COMMAND_BUFFER,
749 &man->dev_priv->cmdbuf_waiters);
750
751out_unlock:
752 mutex_unlock(&man->space_mutex);
753
754 return 0;
755}
756
757/**
758 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
759 * space from the main pool.
760 *
761 * @man: The command buffer manager.
762 * @header: Pointer to the header to set up.
763 * @size: The requested size of the buffer space.
764 * @interruptible: Whether to sleep interruptible while waiting for space.
765 */
766static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
767 struct vmw_cmdbuf_header *header,
768 size_t size,
769 bool interruptible)
770{
771 SVGACBHeader *cb_hdr;
772 size_t offset;
773 int ret;
774
775 if (!man->has_pool)
776 return -ENOMEM;
777
778 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
779
780 if (ret)
781 return ret;
782
783 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
784 &header->handle);
785 if (!header->cb_header) {
786 ret = -ENOMEM;
787 goto out_no_cb_header;
788 }
789
790 header->size = header->node.size << PAGE_SHIFT;
791 cb_hdr = header->cb_header;
792 offset = header->node.start << PAGE_SHIFT;
793 header->cmd = man->map + offset;
794 memset(cb_hdr, 0, sizeof(*cb_hdr));
795 if (man->using_mob) {
796 cb_hdr->flags = SVGA_CB_FLAG_MOB;
797 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
798 cb_hdr->ptr.mob.mobOffset = offset;
799 } else {
800 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
801 }
802
803 return 0;
804
805out_no_cb_header:
806 spin_lock_bh(&man->lock);
807 drm_mm_remove_node(&header->node);
808 spin_unlock_bh(&man->lock);
809
810 return ret;
811}
812
813/**
814 * vmw_cmdbuf_space_inline - Set up a command buffer header with
815 * inline command buffer space.
816 *
817 * @man: The command buffer manager.
818 * @header: Pointer to the header to set up.
819 * @size: The requested size of the buffer space.
820 */
821static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
822 struct vmw_cmdbuf_header *header,
823 int size)
824{
825 struct vmw_cmdbuf_dheader *dheader;
826 SVGACBHeader *cb_hdr;
827
828 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
829 return -ENOMEM;
830
831 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
832 &header->handle);
833 if (!dheader)
834 return -ENOMEM;
835
836 header->inline_space = true;
837 header->size = VMW_CMDBUF_INLINE_SIZE;
838 cb_hdr = &dheader->cb_header;
839 header->cb_header = cb_hdr;
840 header->cmd = dheader->cmd;
841 memset(dheader, 0, sizeof(*dheader));
842 cb_hdr->status = SVGA_CB_STATUS_NONE;
843 cb_hdr->flags = SVGA_CB_FLAG_NONE;
844 cb_hdr->ptr.pa = (u64)header->handle +
845 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
846
847 return 0;
848}
849
850/**
851 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
852 * command buffer space.
853 *
854 * @man: The command buffer manager.
855 * @size: The requested size of the buffer space.
856 * @interruptible: Whether to sleep interruptible while waiting for space.
857 * @p_header: points to a header pointer to populate on successful return.
858 *
859 * Returns a pointer to command buffer space if successful. Otherwise
860 * returns an error pointer. The header pointer returned in @p_header should
861 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
862 */
863void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
864 size_t size, bool interruptible,
865 struct vmw_cmdbuf_header **p_header)
866{
867 struct vmw_cmdbuf_header *header;
868 int ret = 0;
869
870 *p_header = NULL;
871
872 header = kzalloc(sizeof(*header), GFP_KERNEL);
873 if (!header)
874 return ERR_PTR(-ENOMEM);
875
876 if (size <= VMW_CMDBUF_INLINE_SIZE)
877 ret = vmw_cmdbuf_space_inline(man, header, size);
878 else
879 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
880
881 if (ret) {
882 kfree(header);
883 return ERR_PTR(ret);
884 }
885
886 header->man = man;
887 INIT_LIST_HEAD(&header->list);
888 header->cb_header->status = SVGA_CB_STATUS_NONE;
889 *p_header = header;
890
891 return header->cmd;
892}
893
894/**
895 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
896 * command buffer.
897 *
898 * @man: The command buffer manager.
899 * @size: The requested size of the commands.
900 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
901 * @interruptible: Whether to sleep interruptible while waiting for space.
902 *
903 * Returns a pointer to command buffer space if successful. Otherwise
904 * returns an error pointer.
905 */
906static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
907 size_t size,
908 int ctx_id,
909 bool interruptible)
910{
911 struct vmw_cmdbuf_header *cur;
912 void *ret;
913
914 if (vmw_cmdbuf_cur_lock(man, interruptible))
915 return ERR_PTR(-ERESTARTSYS);
916
917 cur = man->cur;
918 if (cur && (size + man->cur_pos > cur->size ||
919 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
920 ctx_id != cur->cb_header->dxContext)))
921 __vmw_cmdbuf_cur_flush(man);
922
923 if (!man->cur) {
924 ret = vmw_cmdbuf_alloc(man,
925 max_t(size_t, size, man->default_size),
926 interruptible, &man->cur);
927 if (IS_ERR(ret)) {
928 vmw_cmdbuf_cur_unlock(man);
929 return ret;
930 }
931
932 cur = man->cur;
933 }
934
935 if (ctx_id != SVGA3D_INVALID_ID) {
936 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
937 cur->cb_header->dxContext = ctx_id;
938 }
939
940 cur->reserved = size;
941
942 return (void *) (man->cur->cmd + man->cur_pos);
943}
944
945/**
946 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
947 *
948 * @man: The command buffer manager.
949 * @size: The size of the commands actually written.
950 * @flush: Whether to flush the command buffer immediately.
951 */
952static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
953 size_t size, bool flush)
954{
955 struct vmw_cmdbuf_header *cur = man->cur;
956
957 WARN_ON(!mutex_is_locked(&man->cur_mutex));
958
959 WARN_ON(size > cur->reserved);
960 man->cur_pos += size;
961 if (!size)
962 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
963 if (flush)
964 __vmw_cmdbuf_cur_flush(man);
965 vmw_cmdbuf_cur_unlock(man);
966}
967
968/**
969 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
970 *
971 * @man: The command buffer manager.
972 * @size: The requested size of the commands.
973 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
974 * @interruptible: Whether to sleep interruptible while waiting for space.
975 * @header: Header of the command buffer. NULL if the current command buffer
976 * should be used.
977 *
978 * Returns a pointer to command buffer space if successful. Otherwise
979 * returns an error pointer.
980 */
981void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
982 int ctx_id, bool interruptible,
983 struct vmw_cmdbuf_header *header)
984{
985 if (!header)
986 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
987
988 if (size > header->size)
989 return ERR_PTR(-EINVAL);
990
991 if (ctx_id != SVGA3D_INVALID_ID) {
992 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
993 header->cb_header->dxContext = ctx_id;
994 }
995
996 header->reserved = size;
997 return header->cmd;
998}
999
1000/**
1001 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1002 *
1003 * @man: The command buffer manager.
1004 * @size: The size of the commands actually written.
1005 * @header: Header of the command buffer. NULL if the current command buffer
1006 * should be used.
1007 * @flush: Whether to flush the command buffer immediately.
1008 */
1009void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1010 struct vmw_cmdbuf_header *header, bool flush)
1011{
1012 if (!header) {
1013 vmw_cmdbuf_commit_cur(man, size, flush);
1014 return;
1015 }
1016
1017 (void) vmw_cmdbuf_cur_lock(man, false);
1018 __vmw_cmdbuf_cur_flush(man);
1019 WARN_ON(size > header->reserved);
1020 man->cur = header;
1021 man->cur_pos = size;
1022 if (!size)
1023 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1024 if (flush)
1025 __vmw_cmdbuf_cur_flush(man);
1026 vmw_cmdbuf_cur_unlock(man);
1027}
1028
1029/**
1030 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1031 *
1032 * @man: The command buffer manager.
1033 */
1034void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1035{
1036 if (!man)
1037 return;
1038
1039 tasklet_schedule(&man->tasklet);
1040}
1041
1042/**
1043 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1044 *
1045 * @man: The command buffer manager.
1046 * @command: Pointer to the command to send.
1047 * @size: Size of the command.
1048 *
1049 * Synchronously sends a device context command.
1050 */
1051static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1052 const void *command,
1053 size_t size)
1054{
1055 struct vmw_cmdbuf_header *header;
1056 int status;
1057 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1058
1059 if (IS_ERR(cmd))
1060 return PTR_ERR(cmd);
1061
1062 memcpy(cmd, command, size);
1063 header->cb_header->length = size;
1064 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1065 spin_lock_bh(&man->lock);
1066 status = vmw_cmdbuf_header_submit(header);
1067 spin_unlock_bh(&man->lock);
1068 vmw_cmdbuf_header_free(header);
1069
1070 if (status != SVGA_CB_STATUS_COMPLETED) {
1071 DRM_ERROR("Device context command failed with status %d\n",
1072 status);
1073 return -EINVAL;
1074 }
1075
1076 return 0;
1077}
1078
1079/**
1080 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1081 * context.
1082 *
1083 * @man: The command buffer manager.
1084 * @enable: Whether to enable or disable the context.
1085 *
1086 * Synchronously sends a device start / stop context command.
1087 */
1088static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1089 bool enable)
1090{
1091 struct {
1092 uint32 id;
1093 SVGADCCmdStartStop body;
1094 } __packed cmd;
1095
1096 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1097 cmd.body.enable = (enable) ? 1 : 0;
1098 cmd.body.context = SVGA_CB_CONTEXT_0;
1099
1100 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1101}
1102
1103/**
1104 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1105 *
1106 * @man: The command buffer manager.
1107 * @size: The size of the main space pool.
1108 * @default_size: The default size of the command buffer for small kernel
1109 * submissions.
1110 *
1111 * Set the size and allocate the main command buffer space pool,
1112 * as well as the default size of the command buffer for
1113 * small kernel submissions. If successful, this enables large command
1114 * submissions. Note that this function requires that rudimentary command
1115 * submission is already available and that the MOB memory manager is alive.
1116 * Returns 0 on success. Negative error code on failure.
1117 */
1118int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1119 size_t size, size_t default_size)
1120{
1121 struct vmw_private *dev_priv = man->dev_priv;
1122 bool dummy;
1123 int ret;
1124
1125 if (man->has_pool)
1126 return -EINVAL;
1127
1128 /* First, try to allocate a huge chunk of DMA memory */
1129 size = PAGE_ALIGN(size);
1130 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1131 &man->handle, GFP_KERNEL);
1132 if (man->map) {
1133 man->using_mob = false;
1134 } else {
1135 /*
1136 * DMA memory failed. If we can have command buffers in a
1137 * MOB, try to use that instead. Note that this will
1138 * actually call into the already enabled manager, when
1139 * binding the MOB.
1140 */
1141 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1142 return -ENOMEM;
1143
1144 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1145 &vmw_mob_ne_placement, 0, false, NULL,
1146 &man->cmd_space);
1147 if (ret)
1148 return ret;
1149
1150 man->using_mob = true;
1151 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1152 &man->map_obj);
1153 if (ret)
1154 goto out_no_map;
1155
1156 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1157 }
1158
1159 man->size = size;
1160 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1161
1162 man->has_pool = true;
1163 man->default_size = default_size;
1164 DRM_INFO("Using command buffers with %s pool.\n",
1165 (man->using_mob) ? "MOB" : "DMA");
1166
1167 return 0;
1168
1169out_no_map:
1170 if (man->using_mob)
1171 ttm_bo_unref(&man->cmd_space);
1172
1173 return ret;
1174}
1175
1176/**
1177 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1178 * inline command buffer submissions only.
1179 *
1180 * @dev_priv: Pointer to device private structure.
1181 *
1182 * Returns a pointer to a cummand buffer manager to success or error pointer
1183 * on failure. The command buffer manager will be enabled for submissions of
1184 * size VMW_CMDBUF_INLINE_SIZE only.
1185 */
1186struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1187{
1188 struct vmw_cmdbuf_man *man;
1189 struct vmw_cmdbuf_context *ctx;
1190 int i;
1191 int ret;
1192
1193 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1194 return ERR_PTR(-ENOSYS);
1195
1196 man = kzalloc(sizeof(*man), GFP_KERNEL);
1197 if (!man)
1198 return ERR_PTR(-ENOMEM);
1199
1200 man->headers = dma_pool_create("vmwgfx cmdbuf",
1201 &dev_priv->dev->pdev->dev,
1202 sizeof(SVGACBHeader),
1203 64, PAGE_SIZE);
1204 if (!man->headers) {
1205 ret = -ENOMEM;
1206 goto out_no_pool;
1207 }
1208
1209 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1210 &dev_priv->dev->pdev->dev,
1211 sizeof(struct vmw_cmdbuf_dheader),
1212 64, PAGE_SIZE);
1213 if (!man->dheaders) {
1214 ret = -ENOMEM;
1215 goto out_no_dpool;
1216 }
1217
1218 for_each_cmdbuf_ctx(man, i, ctx)
1219 vmw_cmdbuf_ctx_init(ctx);
1220
1221 INIT_LIST_HEAD(&man->error);
1222 spin_lock_init(&man->lock);
1223 mutex_init(&man->cur_mutex);
1224 mutex_init(&man->space_mutex);
1225 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1226 (unsigned long) man);
1227 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1228 init_waitqueue_head(&man->alloc_queue);
1229 init_waitqueue_head(&man->idle_queue);
1230 man->dev_priv = dev_priv;
1231 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1232 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1233 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1234 &dev_priv->error_waiters);
1235 ret = vmw_cmdbuf_startstop(man, true);
1236 if (ret) {
1237 DRM_ERROR("Failed starting command buffer context 0.\n");
1238 vmw_cmdbuf_man_destroy(man);
1239 return ERR_PTR(ret);
1240 }
1241
1242 return man;
1243
1244out_no_dpool:
1245 dma_pool_destroy(man->headers);
1246out_no_pool:
1247 kfree(man);
1248
1249 return ERR_PTR(ret);
1250}
1251
1252/**
1253 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1254 *
1255 * @man: Pointer to a command buffer manager.
1256 *
1257 * This function removes the main buffer space pool, and should be called
1258 * before MOB memory management is removed. When this function has been called,
1259 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1260 * less are allowed, and the default size of the command buffer for small kernel
1261 * submissions is also set to this size.
1262 */
1263void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1264{
1265 if (!man->has_pool)
1266 return;
1267
1268 man->has_pool = false;
1269 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1270 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1271 if (man->using_mob) {
1272 (void) ttm_bo_kunmap(&man->map_obj);
1273 ttm_bo_unref(&man->cmd_space);
1274 } else {
1275 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1276 man->size, man->map, man->handle);
1277 }
1278}
1279
1280/**
1281 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1282 *
1283 * @man: Pointer to a command buffer manager.
1284 *
1285 * This function idles and then destroys a command buffer manager.
1286 */
1287void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1288{
1289 WARN_ON_ONCE(man->has_pool);
1290 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1291 if (vmw_cmdbuf_startstop(man, false))
1292 DRM_ERROR("Failed stopping command buffer context 0.\n");
1293
1294 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1295 &man->dev_priv->error_waiters);
1296 tasklet_kill(&man->tasklet);
1297 (void) cancel_work_sync(&man->work);
1298 dma_pool_destroy(man->dheaders);
1299 dma_pool_destroy(man->headers);
1300 mutex_destroy(&man->cur_mutex);
1301 mutex_destroy(&man->space_mutex);
1302 kfree(man);
1303}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f8dad0..13db8a2851ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
29 30
30#define VMW_CMDBUF_RES_MAN_HT_ORDER 12 31#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
31 32
32enum vmw_cmdbuf_res_state {
33 VMW_CMDBUF_RES_COMMITED,
34 VMW_CMDBUF_RES_ADD,
35 VMW_CMDBUF_RES_DEL
36};
37
38/** 33/**
39 * struct vmw_cmdbuf_res - Command buffer managed resource entry. 34 * struct vmw_cmdbuf_res - Command buffer managed resource entry.
40 * 35 *
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
132 127
133 list_for_each_entry_safe(entry, next, list, head) { 128 list_for_each_entry_safe(entry, next, list, head) {
134 list_del(&entry->head); 129 list_del(&entry->head);
130 if (entry->res->func->commit_notify)
131 entry->res->func->commit_notify(entry->res,
132 entry->state);
135 switch (entry->state) { 133 switch (entry->state) {
136 case VMW_CMDBUF_RES_ADD: 134 case VMW_CMDBUF_RES_ADD:
137 entry->state = VMW_CMDBUF_RES_COMMITED; 135 entry->state = VMW_CMDBUF_RES_COMMITTED;
138 list_add_tail(&entry->head, &entry->man->list); 136 list_add_tail(&entry->head, &entry->man->list);
139 break; 137 break;
140 case VMW_CMDBUF_RES_DEL: 138 case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
175 &entry->hash); 173 &entry->hash);
176 list_del(&entry->head); 174 list_del(&entry->head);
177 list_add_tail(&entry->head, &entry->man->list); 175 list_add_tail(&entry->head, &entry->man->list);
178 entry->state = VMW_CMDBUF_RES_COMMITED; 176 entry->state = VMW_CMDBUF_RES_COMMITTED;
179 break; 177 break;
180 default: 178 default:
181 BUG(); 179 BUG();
@@ -231,6 +229,9 @@ out_invalid_key:
231 * @res_type: The resource type. 229 * @res_type: The resource type.
232 * @user_key: The user-space id of the resource. 230 * @user_key: The user-space id of the resource.
233 * @list: The staging list. 231 * @list: The staging list.
232 * @res_p: If the resource is in an already committed state, points to the
233 * struct vmw_resource on successful return. The pointer will be
234 * non ref-counted.
234 * 235 *
235 * This function looks up the struct vmw_cmdbuf_res entry from the manager 236 * This function looks up the struct vmw_cmdbuf_res entry from the manager
236 * hash table and, if it exists, removes it. Depending on its current staging 237 * hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@ out_invalid_key:
240int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 241int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
241 enum vmw_cmdbuf_res_type res_type, 242 enum vmw_cmdbuf_res_type res_type,
242 u32 user_key, 243 u32 user_key,
243 struct list_head *list) 244 struct list_head *list,
245 struct vmw_resource **res_p)
244{ 246{
245 struct vmw_cmdbuf_res *entry; 247 struct vmw_cmdbuf_res *entry;
246 struct drm_hash_item *hash; 248 struct drm_hash_item *hash;
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
256 switch (entry->state) { 258 switch (entry->state) {
257 case VMW_CMDBUF_RES_ADD: 259 case VMW_CMDBUF_RES_ADD:
258 vmw_cmdbuf_res_free(man, entry); 260 vmw_cmdbuf_res_free(man, entry);
261 *res_p = NULL;
259 break; 262 break;
260 case VMW_CMDBUF_RES_COMMITED: 263 case VMW_CMDBUF_RES_COMMITTED:
261 (void) drm_ht_remove_item(&man->resources, &entry->hash); 264 (void) drm_ht_remove_item(&man->resources, &entry->hash);
262 list_del(&entry->head); 265 list_del(&entry->head);
263 entry->state = VMW_CMDBUF_RES_DEL; 266 entry->state = VMW_CMDBUF_RES_DEL;
264 list_add_tail(&entry->head, list); 267 list_add_tail(&entry->head, list);
268 *res_p = entry->res;
265 break; 269 break;
266 default: 270 default:
267 BUG(); 271 BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac92874404d..443d1ed00de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_binding.h"
30#include "ttm/ttm_placement.h" 31#include "ttm/ttm_placement.h"
31 32
32struct vmw_user_context { 33struct vmw_user_context {
33 struct ttm_base_object base; 34 struct ttm_base_object base;
34 struct vmw_resource res; 35 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs; 36 struct vmw_ctx_binding_state *cbs;
36 struct vmw_cmdbuf_res_manager *man; 37 struct vmw_cmdbuf_res_manager *man;
38 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
39 spinlock_t cotable_lock;
40 struct vmw_dma_buffer *dx_query_mob;
37}; 41};
38 42
39
40
41typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
42
43static void vmw_user_context_free(struct vmw_resource *res); 43static void vmw_user_context_free(struct vmw_resource *res);
44static struct vmw_resource * 44static struct vmw_resource *
45vmw_user_context_base_to_res(struct ttm_base_object *base); 45vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
51 bool readback, 51 bool readback,
52 struct ttm_validate_buffer *val_buf); 52 struct ttm_validate_buffer *val_buf);
53static int vmw_gb_context_destroy(struct vmw_resource *res); 53static int vmw_gb_context_destroy(struct vmw_resource *res);
54static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); 54static int vmw_dx_context_create(struct vmw_resource *res);
55static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, 55static int vmw_dx_context_bind(struct vmw_resource *res,
56 bool rebind); 56 struct ttm_validate_buffer *val_buf);
57static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); 57static int vmw_dx_context_unbind(struct vmw_resource *res,
58static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); 58 bool readback,
59static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 59 struct ttm_validate_buffer *val_buf);
60static int vmw_dx_context_destroy(struct vmw_resource *res);
61
60static uint64_t vmw_user_context_size; 62static uint64_t vmw_user_context_size;
61 63
62static const struct vmw_user_resource_conv user_context_conv = { 64static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = {
93 .unbind = vmw_gb_context_unbind 95 .unbind = vmw_gb_context_unbind
94}; 96};
95 97
96static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { 98static const struct vmw_res_func vmw_dx_context_func = {
97 [vmw_ctx_binding_shader] = vmw_context_scrub_shader, 99 .res_type = vmw_res_dx_context,
98 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, 100 .needs_backup = true,
99 [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; 101 .may_evict = true,
102 .type_name = "dx contexts",
103 .backup_placement = &vmw_mob_placement,
104 .create = vmw_dx_context_create,
105 .destroy = vmw_dx_context_destroy,
106 .bind = vmw_dx_context_bind,
107 .unbind = vmw_dx_context_unbind
108};
100 109
101/** 110/**
102 * Context management: 111 * Context management:
103 */ 112 */
104 113
114static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
115{
116 struct vmw_resource *res;
117 int i;
118
119 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
120 spin_lock(&uctx->cotable_lock);
121 res = uctx->cotables[i];
122 uctx->cotables[i] = NULL;
123 spin_unlock(&uctx->cotable_lock);
124
125 if (res)
126 vmw_resource_unreference(&res);
127 }
128}
129
105static void vmw_hw_context_destroy(struct vmw_resource *res) 130static void vmw_hw_context_destroy(struct vmw_resource *res)
106{ 131{
107 struct vmw_user_context *uctx = 132 struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
113 } *cmd; 138 } *cmd;
114 139
115 140
116 if (res->func->destroy == vmw_gb_context_destroy) { 141 if (res->func->destroy == vmw_gb_context_destroy ||
142 res->func->destroy == vmw_dx_context_destroy) {
117 mutex_lock(&dev_priv->cmdbuf_mutex); 143 mutex_lock(&dev_priv->cmdbuf_mutex);
118 vmw_cmdbuf_res_man_destroy(uctx->man); 144 vmw_cmdbuf_res_man_destroy(uctx->man);
119 mutex_lock(&dev_priv->binding_mutex); 145 mutex_lock(&dev_priv->binding_mutex);
120 (void) vmw_context_binding_state_kill(&uctx->cbs); 146 vmw_binding_state_kill(uctx->cbs);
121 (void) vmw_gb_context_destroy(res); 147 (void) res->func->destroy(res);
122 mutex_unlock(&dev_priv->binding_mutex); 148 mutex_unlock(&dev_priv->binding_mutex);
123 if (dev_priv->pinned_bo != NULL && 149 if (dev_priv->pinned_bo != NULL &&
124 !dev_priv->query_cid_valid) 150 !dev_priv->query_cid_valid)
125 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 151 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
126 mutex_unlock(&dev_priv->cmdbuf_mutex); 152 mutex_unlock(&dev_priv->cmdbuf_mutex);
153 vmw_context_cotables_unref(uctx);
127 return; 154 return;
128 } 155 }
129 156
@@ -135,43 +162,67 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
135 return; 162 return;
136 } 163 }
137 164
138 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); 165 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
139 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 166 cmd->header.size = sizeof(cmd->body);
140 cmd->body.cid = cpu_to_le32(res->id); 167 cmd->body.cid = res->id;
141 168
142 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 169 vmw_fifo_commit(dev_priv, sizeof(*cmd));
143 vmw_3d_resource_dec(dev_priv, false); 170 vmw_fifo_resource_dec(dev_priv);
144} 171}
145 172
146static int vmw_gb_context_init(struct vmw_private *dev_priv, 173static int vmw_gb_context_init(struct vmw_private *dev_priv,
174 bool dx,
147 struct vmw_resource *res, 175 struct vmw_resource *res,
148 void (*res_free) (struct vmw_resource *res)) 176 void (*res_free)(struct vmw_resource *res))
149{ 177{
150 int ret; 178 int ret, i;
151 struct vmw_user_context *uctx = 179 struct vmw_user_context *uctx =
152 container_of(res, struct vmw_user_context, res); 180 container_of(res, struct vmw_user_context, res);
153 181
182 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
183 SVGA3D_CONTEXT_DATA_SIZE);
154 ret = vmw_resource_init(dev_priv, res, true, 184 ret = vmw_resource_init(dev_priv, res, true,
155 res_free, &vmw_gb_context_func); 185 res_free,
156 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; 186 dx ? &vmw_dx_context_func :
187 &vmw_gb_context_func);
157 if (unlikely(ret != 0)) 188 if (unlikely(ret != 0))
158 goto out_err; 189 goto out_err;
159 190
160 if (dev_priv->has_mob) { 191 if (dev_priv->has_mob) {
161 uctx->man = vmw_cmdbuf_res_man_create(dev_priv); 192 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
162 if (unlikely(IS_ERR(uctx->man))) { 193 if (IS_ERR(uctx->man)) {
163 ret = PTR_ERR(uctx->man); 194 ret = PTR_ERR(uctx->man);
164 uctx->man = NULL; 195 uctx->man = NULL;
165 goto out_err; 196 goto out_err;
166 } 197 }
167 } 198 }
168 199
169 memset(&uctx->cbs, 0, sizeof(uctx->cbs)); 200 uctx->cbs = vmw_binding_state_alloc(dev_priv);
170 INIT_LIST_HEAD(&uctx->cbs.list); 201 if (IS_ERR(uctx->cbs)) {
202 ret = PTR_ERR(uctx->cbs);
203 goto out_err;
204 }
205
206 spin_lock_init(&uctx->cotable_lock);
207
208 if (dx) {
209 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
210 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
211 &uctx->res, i);
212 if (unlikely(uctx->cotables[i] == NULL)) {
213 ret = -ENOMEM;
214 goto out_cotables;
215 }
216 }
217 }
218
219
171 220
172 vmw_resource_activate(res, vmw_hw_context_destroy); 221 vmw_resource_activate(res, vmw_hw_context_destroy);
173 return 0; 222 return 0;
174 223
224out_cotables:
225 vmw_context_cotables_unref(uctx);
175out_err: 226out_err:
176 if (res_free) 227 if (res_free)
177 res_free(res); 228 res_free(res);
@@ -182,7 +233,8 @@ out_err:
182 233
183static int vmw_context_init(struct vmw_private *dev_priv, 234static int vmw_context_init(struct vmw_private *dev_priv,
184 struct vmw_resource *res, 235 struct vmw_resource *res,
185 void (*res_free) (struct vmw_resource *res)) 236 void (*res_free)(struct vmw_resource *res),
237 bool dx)
186{ 238{
187 int ret; 239 int ret;
188 240
@@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
192 } *cmd; 244 } *cmd;
193 245
194 if (dev_priv->has_mob) 246 if (dev_priv->has_mob)
195 return vmw_gb_context_init(dev_priv, res, res_free); 247 return vmw_gb_context_init(dev_priv, dx, res, res_free);
196 248
197 ret = vmw_resource_init(dev_priv, res, false, 249 ret = vmw_resource_init(dev_priv, res, false,
198 res_free, &vmw_legacy_context_func); 250 res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv,
215 return -ENOMEM; 267 return -ENOMEM;
216 } 268 }
217 269
218 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); 270 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
219 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 271 cmd->header.size = sizeof(cmd->body);
220 cmd->body.cid = cpu_to_le32(res->id); 272 cmd->body.cid = res->id;
221 273
222 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 274 vmw_fifo_commit(dev_priv, sizeof(*cmd));
223 (void) vmw_3d_resource_inc(dev_priv, false); 275 vmw_fifo_resource_inc(dev_priv);
224 vmw_resource_activate(res, vmw_hw_context_destroy); 276 vmw_resource_activate(res, vmw_hw_context_destroy);
225 return 0; 277 return 0;
226 278
@@ -232,19 +284,10 @@ out_early:
232 return ret; 284 return ret;
233} 285}
234 286
235struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
236{
237 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
238 int ret;
239
240 if (unlikely(res == NULL))
241 return NULL;
242
243 ret = vmw_context_init(dev_priv, res, NULL);
244
245 return (ret == 0) ? res : NULL;
246}
247 287
288/*
289 * GB context.
290 */
248 291
249static int vmw_gb_context_create(struct vmw_resource *res) 292static int vmw_gb_context_create(struct vmw_resource *res)
250{ 293{
@@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
281 cmd->header.size = sizeof(cmd->body); 324 cmd->header.size = sizeof(cmd->body);
282 cmd->body.cid = res->id; 325 cmd->body.cid = res->id;
283 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 326 vmw_fifo_commit(dev_priv, sizeof(*cmd));
284 (void) vmw_3d_resource_inc(dev_priv, false); 327 vmw_fifo_resource_inc(dev_priv);
285 328
286 return 0; 329 return 0;
287 330
@@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
309 "binding.\n"); 352 "binding.\n");
310 return -ENOMEM; 353 return -ENOMEM;
311 } 354 }
312
313 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 355 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
314 cmd->header.size = sizeof(cmd->body); 356 cmd->header.size = sizeof(cmd->body);
315 cmd->body.cid = res->id; 357 cmd->body.cid = res->id;
@@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
346 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 388 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
347 389
348 mutex_lock(&dev_priv->binding_mutex); 390 mutex_lock(&dev_priv->binding_mutex);
349 vmw_context_binding_state_scrub(&uctx->cbs); 391 vmw_binding_state_scrub(uctx->cbs);
350 392
351 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 393 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
352 394
@@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
414 if (dev_priv->query_cid == res->id) 456 if (dev_priv->query_cid == res->id)
415 dev_priv->query_cid_valid = false; 457 dev_priv->query_cid_valid = false;
416 vmw_resource_release_id(res); 458 vmw_resource_release_id(res);
417 vmw_3d_resource_dec(dev_priv, false); 459 vmw_fifo_resource_dec(dev_priv);
460
461 return 0;
462}
463
464/*
465 * DX context.
466 */
467
468static int vmw_dx_context_create(struct vmw_resource *res)
469{
470 struct vmw_private *dev_priv = res->dev_priv;
471 int ret;
472 struct {
473 SVGA3dCmdHeader header;
474 SVGA3dCmdDXDefineContext body;
475 } *cmd;
476
477 if (likely(res->id != -1))
478 return 0;
479
480 ret = vmw_resource_alloc_id(res);
481 if (unlikely(ret != 0)) {
482 DRM_ERROR("Failed to allocate a context id.\n");
483 goto out_no_id;
484 }
485
486 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
487 ret = -EBUSY;
488 goto out_no_fifo;
489 }
490
491 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
492 if (unlikely(cmd == NULL)) {
493 DRM_ERROR("Failed reserving FIFO space for context "
494 "creation.\n");
495 ret = -ENOMEM;
496 goto out_no_fifo;
497 }
498
499 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
500 cmd->header.size = sizeof(cmd->body);
501 cmd->body.cid = res->id;
502 vmw_fifo_commit(dev_priv, sizeof(*cmd));
503 vmw_fifo_resource_inc(dev_priv);
504
505 return 0;
506
507out_no_fifo:
508 vmw_resource_release_id(res);
509out_no_id:
510 return ret;
511}
512
513static int vmw_dx_context_bind(struct vmw_resource *res,
514 struct ttm_validate_buffer *val_buf)
515{
516 struct vmw_private *dev_priv = res->dev_priv;
517 struct {
518 SVGA3dCmdHeader header;
519 SVGA3dCmdDXBindContext body;
520 } *cmd;
521 struct ttm_buffer_object *bo = val_buf->bo;
522
523 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
524
525 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
526 if (unlikely(cmd == NULL)) {
527 DRM_ERROR("Failed reserving FIFO space for context "
528 "binding.\n");
529 return -ENOMEM;
530 }
531
532 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
533 cmd->header.size = sizeof(cmd->body);
534 cmd->body.cid = res->id;
535 cmd->body.mobid = bo->mem.start;
536 cmd->body.validContents = res->backup_dirty;
537 res->backup_dirty = false;
538 vmw_fifo_commit(dev_priv, sizeof(*cmd));
539
540
541 return 0;
542}
543
544/**
545 * vmw_dx_context_scrub_cotables - Scrub all bindings and
546 * cotables from a context
547 *
548 * @ctx: Pointer to the context resource
549 * @readback: Whether to save the otable contents on scrubbing.
550 *
551 * COtables must be unbound before their context, but unbinding requires
552 * the backup buffer being reserved, whereas scrubbing does not.
553 * This function scrubs all cotables of a context, potentially reading back
554 * the contents into their backup buffers. However, scrubbing cotables
555 * also makes the device context invalid, so scrub all bindings first so
556 * that doesn't have to be done later with an invalid context.
557 */
558void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
559 bool readback)
560{
561 struct vmw_user_context *uctx =
562 container_of(ctx, struct vmw_user_context, res);
563 int i;
564
565 vmw_binding_state_scrub(uctx->cbs);
566 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
567 struct vmw_resource *res;
568
569 /* Avoid racing with ongoing cotable destruction. */
570 spin_lock(&uctx->cotable_lock);
571 res = uctx->cotables[vmw_cotable_scrub_order[i]];
572 if (res)
573 res = vmw_resource_reference_unless_doomed(res);
574 spin_unlock(&uctx->cotable_lock);
575 if (!res)
576 continue;
577
578 WARN_ON(vmw_cotable_scrub(res, readback));
579 vmw_resource_unreference(&res);
580 }
581}
582
583static int vmw_dx_context_unbind(struct vmw_resource *res,
584 bool readback,
585 struct ttm_validate_buffer *val_buf)
586{
587 struct vmw_private *dev_priv = res->dev_priv;
588 struct ttm_buffer_object *bo = val_buf->bo;
589 struct vmw_fence_obj *fence;
590 struct vmw_user_context *uctx =
591 container_of(res, struct vmw_user_context, res);
592
593 struct {
594 SVGA3dCmdHeader header;
595 SVGA3dCmdDXReadbackContext body;
596 } *cmd1;
597 struct {
598 SVGA3dCmdHeader header;
599 SVGA3dCmdDXBindContext body;
600 } *cmd2;
601 uint32_t submit_size;
602 uint8_t *cmd;
603
604
605 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
606
607 mutex_lock(&dev_priv->binding_mutex);
608 vmw_dx_context_scrub_cotables(res, readback);
609
610 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
611 readback) {
612 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
613 if (vmw_query_readback_all(uctx->dx_query_mob))
614 DRM_ERROR("Failed to read back query states\n");
615 }
616
617 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
618
619 cmd = vmw_fifo_reserve(dev_priv, submit_size);
620 if (unlikely(cmd == NULL)) {
621 DRM_ERROR("Failed reserving FIFO space for context "
622 "unbinding.\n");
623 mutex_unlock(&dev_priv->binding_mutex);
624 return -ENOMEM;
625 }
626
627 cmd2 = (void *) cmd;
628 if (readback) {
629 cmd1 = (void *) cmd;
630 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
631 cmd1->header.size = sizeof(cmd1->body);
632 cmd1->body.cid = res->id;
633 cmd2 = (void *) (&cmd1[1]);
634 }
635 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
636 cmd2->header.size = sizeof(cmd2->body);
637 cmd2->body.cid = res->id;
638 cmd2->body.mobid = SVGA3D_INVALID_ID;
639
640 vmw_fifo_commit(dev_priv, submit_size);
641 mutex_unlock(&dev_priv->binding_mutex);
642
643 /*
644 * Create a fence object and fence the backup buffer.
645 */
646
647 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
648 &fence, NULL);
649
650 vmw_fence_single_bo(bo, fence);
651
652 if (likely(fence != NULL))
653 vmw_fence_obj_unreference(&fence);
654
655 return 0;
656}
657
658static int vmw_dx_context_destroy(struct vmw_resource *res)
659{
660 struct vmw_private *dev_priv = res->dev_priv;
661 struct {
662 SVGA3dCmdHeader header;
663 SVGA3dCmdDXDestroyContext body;
664 } *cmd;
665
666 if (likely(res->id == -1))
667 return 0;
668
669 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
670 if (unlikely(cmd == NULL)) {
671 DRM_ERROR("Failed reserving FIFO space for context "
672 "destruction.\n");
673 return -ENOMEM;
674 }
675
676 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
677 cmd->header.size = sizeof(cmd->body);
678 cmd->body.cid = res->id;
679 vmw_fifo_commit(dev_priv, sizeof(*cmd));
680 if (dev_priv->query_cid == res->id)
681 dev_priv->query_cid_valid = false;
682 vmw_resource_release_id(res);
683 vmw_fifo_resource_dec(dev_priv);
418 684
419 return 0; 685 return 0;
420} 686}
@@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
435 container_of(res, struct vmw_user_context, res); 701 container_of(res, struct vmw_user_context, res);
436 struct vmw_private *dev_priv = res->dev_priv; 702 struct vmw_private *dev_priv = res->dev_priv;
437 703
704 if (ctx->cbs)
705 vmw_binding_state_free(ctx->cbs);
706
707 (void) vmw_context_bind_dx_query(res, NULL);
708
438 ttm_base_object_kfree(ctx, base); 709 ttm_base_object_kfree(ctx, base);
439 ttm_mem_global_free(vmw_mem_glob(dev_priv), 710 ttm_mem_global_free(vmw_mem_glob(dev_priv),
440 vmw_user_context_size); 711 vmw_user_context_size);
@@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
465 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); 736 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
466} 737}
467 738
468int vmw_context_define_ioctl(struct drm_device *dev, void *data, 739static int vmw_context_define(struct drm_device *dev, void *data,
469 struct drm_file *file_priv) 740 struct drm_file *file_priv, bool dx)
470{ 741{
471 struct vmw_private *dev_priv = vmw_priv(dev); 742 struct vmw_private *dev_priv = vmw_priv(dev);
472 struct vmw_user_context *ctx; 743 struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
476 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 747 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
477 int ret; 748 int ret;
478 749
750 if (!dev_priv->has_dx && dx) {
751 DRM_ERROR("DX contexts not supported by device.\n");
752 return -EINVAL;
753 }
479 754
480 /* 755 /*
481 * Approximate idr memory usage with 128 bytes. It will be limited 756 * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
516 * From here on, the destructor takes over resource freeing. 791 * From here on, the destructor takes over resource freeing.
517 */ 792 */
518 793
519 ret = vmw_context_init(dev_priv, res, vmw_user_context_free); 794 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
520 if (unlikely(ret != 0)) 795 if (unlikely(ret != 0))
521 goto out_unlock; 796 goto out_unlock;
522 797
@@ -535,387 +810,128 @@ out_err:
535out_unlock: 810out_unlock:
536 ttm_read_unlock(&dev_priv->reservation_sem); 811 ttm_read_unlock(&dev_priv->reservation_sem);
537 return ret; 812 return ret;
538
539}
540
541/**
542 * vmw_context_scrub_shader - scrub a shader binding from a context.
543 *
544 * @bi: single binding information.
545 * @rebind: Whether to issue a bind instead of scrub command.
546 */
547static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
548{
549 struct vmw_private *dev_priv = bi->ctx->dev_priv;
550 struct {
551 SVGA3dCmdHeader header;
552 SVGA3dCmdSetShader body;
553 } *cmd;
554
555 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
556 if (unlikely(cmd == NULL)) {
557 DRM_ERROR("Failed reserving FIFO space for shader "
558 "unbinding.\n");
559 return -ENOMEM;
560 }
561
562 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
563 cmd->header.size = sizeof(cmd->body);
564 cmd->body.cid = bi->ctx->id;
565 cmd->body.type = bi->i1.shader_type;
566 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
567 vmw_fifo_commit(dev_priv, sizeof(*cmd));
568
569 return 0;
570}
571
572/**
573 * vmw_context_scrub_render_target - scrub a render target binding
574 * from a context.
575 *
576 * @bi: single binding information.
577 * @rebind: Whether to issue a bind instead of scrub command.
578 */
579static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
580 bool rebind)
581{
582 struct vmw_private *dev_priv = bi->ctx->dev_priv;
583 struct {
584 SVGA3dCmdHeader header;
585 SVGA3dCmdSetRenderTarget body;
586 } *cmd;
587
588 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
589 if (unlikely(cmd == NULL)) {
590 DRM_ERROR("Failed reserving FIFO space for render target "
591 "unbinding.\n");
592 return -ENOMEM;
593 }
594
595 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
596 cmd->header.size = sizeof(cmd->body);
597 cmd->body.cid = bi->ctx->id;
598 cmd->body.type = bi->i1.rt_type;
599 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
600 cmd->body.target.face = 0;
601 cmd->body.target.mipmap = 0;
602 vmw_fifo_commit(dev_priv, sizeof(*cmd));
603
604 return 0;
605} 813}
606 814
607/** 815int vmw_context_define_ioctl(struct drm_device *dev, void *data,
608 * vmw_context_scrub_texture - scrub a texture binding from a context. 816 struct drm_file *file_priv)
609 *
610 * @bi: single binding information.
611 * @rebind: Whether to issue a bind instead of scrub command.
612 *
613 * TODO: Possibly complement this function with a function that takes
614 * a list of texture bindings and combines them to a single command.
615 */
616static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
617 bool rebind)
618{
619 struct vmw_private *dev_priv = bi->ctx->dev_priv;
620 struct {
621 SVGA3dCmdHeader header;
622 struct {
623 SVGA3dCmdSetTextureState c;
624 SVGA3dTextureState s1;
625 } body;
626 } *cmd;
627
628 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
629 if (unlikely(cmd == NULL)) {
630 DRM_ERROR("Failed reserving FIFO space for texture "
631 "unbinding.\n");
632 return -ENOMEM;
633 }
634
635
636 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
637 cmd->header.size = sizeof(cmd->body);
638 cmd->body.c.cid = bi->ctx->id;
639 cmd->body.s1.stage = bi->i1.texture_stage;
640 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
641 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
642 vmw_fifo_commit(dev_priv, sizeof(*cmd));
643
644 return 0;
645}
646
647/**
648 * vmw_context_binding_drop: Stop tracking a context binding
649 *
650 * @cb: Pointer to binding tracker storage.
651 *
652 * Stops tracking a context binding, and re-initializes its storage.
653 * Typically used when the context binding is replaced with a binding to
654 * another (or the same, for that matter) resource.
655 */
656static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
657{ 817{
658 list_del(&cb->ctx_list); 818 return vmw_context_define(dev, data, file_priv, false);
659 if (!list_empty(&cb->res_list))
660 list_del(&cb->res_list);
661 cb->bi.ctx = NULL;
662} 819}
663 820
664/** 821int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
665 * vmw_context_binding_add: Start tracking a context binding 822 struct drm_file *file_priv)
666 *
667 * @cbs: Pointer to the context binding state tracker.
668 * @bi: Information about the binding to track.
669 *
670 * Performs basic checks on the binding to make sure arguments are within
671 * bounds and then starts tracking the binding in the context binding
672 * state structure @cbs.
673 */
674int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
675 const struct vmw_ctx_bindinfo *bi)
676{ 823{
677 struct vmw_ctx_binding *loc; 824 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
678 825 struct drm_vmw_context_arg *rep = &arg->rep;
679 switch (bi->bt) { 826
680 case vmw_ctx_binding_rt: 827 switch (arg->req) {
681 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { 828 case drm_vmw_context_legacy:
682 DRM_ERROR("Illegal render target type %u.\n", 829 return vmw_context_define(dev, rep, file_priv, false);
683 (unsigned) bi->i1.rt_type); 830 case drm_vmw_context_dx:
684 return -EINVAL; 831 return vmw_context_define(dev, rep, file_priv, true);
685 }
686 loc = &cbs->render_targets[bi->i1.rt_type];
687 break;
688 case vmw_ctx_binding_tex:
689 if (unlikely((unsigned)bi->i1.texture_stage >=
690 SVGA3D_NUM_TEXTURE_UNITS)) {
691 DRM_ERROR("Illegal texture/sampler unit %u.\n",
692 (unsigned) bi->i1.texture_stage);
693 return -EINVAL;
694 }
695 loc = &cbs->texture_units[bi->i1.texture_stage];
696 break;
697 case vmw_ctx_binding_shader:
698 if (unlikely((unsigned)bi->i1.shader_type >=
699 SVGA3D_SHADERTYPE_MAX)) {
700 DRM_ERROR("Illegal shader type %u.\n",
701 (unsigned) bi->i1.shader_type);
702 return -EINVAL;
703 }
704 loc = &cbs->shaders[bi->i1.shader_type];
705 break;
706 default: 832 default:
707 BUG();
708 }
709
710 if (loc->bi.ctx != NULL)
711 vmw_context_binding_drop(loc);
712
713 loc->bi = *bi;
714 loc->bi.scrubbed = false;
715 list_add_tail(&loc->ctx_list, &cbs->list);
716 INIT_LIST_HEAD(&loc->res_list);
717
718 return 0;
719}
720
721/**
722 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
723 *
724 * @cbs: Pointer to the persistent context binding state tracker.
725 * @bi: Information about the binding to track.
726 *
727 */
728static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
729 const struct vmw_ctx_bindinfo *bi)
730{
731 struct vmw_ctx_binding *loc;
732
733 switch (bi->bt) {
734 case vmw_ctx_binding_rt:
735 loc = &cbs->render_targets[bi->i1.rt_type];
736 break; 833 break;
737 case vmw_ctx_binding_tex:
738 loc = &cbs->texture_units[bi->i1.texture_stage];
739 break;
740 case vmw_ctx_binding_shader:
741 loc = &cbs->shaders[bi->i1.shader_type];
742 break;
743 default:
744 BUG();
745 }
746
747 if (loc->bi.ctx != NULL)
748 vmw_context_binding_drop(loc);
749
750 if (bi->res != NULL) {
751 loc->bi = *bi;
752 list_add_tail(&loc->ctx_list, &cbs->list);
753 list_add_tail(&loc->res_list, &bi->res->binding_head);
754 } 834 }
835 return -EINVAL;
755} 836}
756 837
757/** 838/**
758 * vmw_context_binding_kill - Kill a binding on the device 839 * vmw_context_binding_list - Return a list of context bindings
759 * and stop tracking it.
760 *
761 * @cb: Pointer to binding tracker storage.
762 *
763 * Emits FIFO commands to scrub a binding represented by @cb.
764 * Then stops tracking the binding and re-initializes its storage.
765 */
766static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
767{
768 if (!cb->bi.scrubbed) {
769 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
770 cb->bi.scrubbed = true;
771 }
772 vmw_context_binding_drop(cb);
773}
774
775/**
776 * vmw_context_binding_state_kill - Kill all bindings associated with a
777 * struct vmw_ctx_binding state structure, and re-initialize the structure.
778 * 840 *
779 * @cbs: Pointer to the context binding state tracker. 841 * @ctx: The context resource
780 * 842 *
781 * Emits commands to scrub all bindings associated with the 843 * Returns the current list of bindings of the given context. Note that
782 * context binding state tracker. Then re-initializes the whole structure. 844 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
783 */ 845 */
784static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) 846struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
785{ 847{
786 struct vmw_ctx_binding *entry, *next; 848 struct vmw_user_context *uctx =
849 container_of(ctx, struct vmw_user_context, res);
787 850
788 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) 851 return vmw_binding_state_list(uctx->cbs);
789 vmw_context_binding_kill(entry);
790} 852}
791 853
792/** 854struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
793 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
794 * struct vmw_ctx_binding state structure.
795 *
796 * @cbs: Pointer to the context binding state tracker.
797 *
798 * Emits commands to scrub all bindings associated with the
799 * context binding state tracker.
800 */
801static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
802{ 855{
803 struct vmw_ctx_binding *entry; 856 return container_of(ctx, struct vmw_user_context, res)->man;
804
805 list_for_each_entry(entry, &cbs->list, ctx_list) {
806 if (!entry->bi.scrubbed) {
807 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
808 entry->bi.scrubbed = true;
809 }
810 }
811} 857}
812 858
813/** 859struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
814 * vmw_context_binding_res_list_kill - Kill all bindings on a 860 SVGACOTableType cotable_type)
815 * resource binding list
816 *
817 * @head: list head of resource binding list
818 *
819 * Kills all bindings associated with a specific resource. Typically
820 * called before the resource is destroyed.
821 */
822void vmw_context_binding_res_list_kill(struct list_head *head)
823{ 861{
824 struct vmw_ctx_binding *entry, *next; 862 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
863 return ERR_PTR(-EINVAL);
825 864
826 list_for_each_entry_safe(entry, next, head, res_list) 865 return vmw_resource_reference
827 vmw_context_binding_kill(entry); 866 (container_of(ctx, struct vmw_user_context, res)->
867 cotables[cotable_type]);
828} 868}
829 869
830/** 870/**
831 * vmw_context_binding_res_list_scrub - Scrub all bindings on a 871 * vmw_context_binding_state -
832 * resource binding list 872 * Return a pointer to a context binding state structure
833 * 873 *
834 * @head: list head of resource binding list 874 * @ctx: The context resource
835 * 875 *
836 * Scrub all bindings associated with a specific resource. Typically 876 * Returns the current state of bindings of the given context. Note that
837 * called before the resource is evicted. 877 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
838 */ 878 */
839void vmw_context_binding_res_list_scrub(struct list_head *head) 879struct vmw_ctx_binding_state *
880vmw_context_binding_state(struct vmw_resource *ctx)
840{ 881{
841 struct vmw_ctx_binding *entry; 882 return container_of(ctx, struct vmw_user_context, res)->cbs;
842
843 list_for_each_entry(entry, head, res_list) {
844 if (!entry->bi.scrubbed) {
845 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
846 entry->bi.scrubbed = true;
847 }
848 }
849} 883}
850 884
851/** 885/**
852 * vmw_context_binding_state_transfer - Commit staged binding info 886 * vmw_context_bind_dx_query -
887 * Sets query MOB for the context. If @mob is NULL, then this function will
888 * remove the association between the MOB and the context. This function
889 * assumes the binding_mutex is held.
853 * 890 *
854 * @ctx: Pointer to context to commit the staged binding info to. 891 * @ctx_res: The context resource
855 * @from: Staged binding info built during execbuf. 892 * @mob: a reference to the query MOB
856 * 893 *
857 * Transfers binding info from a temporary structure to the persistent 894 * Returns -EINVAL if a MOB has already been set and does not match the one
858 * structure in the context. This can be done once commands 895 * specified in the parameter. 0 otherwise.
859 */ 896 */
860void vmw_context_binding_state_transfer(struct vmw_resource *ctx, 897int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
861 struct vmw_ctx_binding_state *from) 898 struct vmw_dma_buffer *mob)
862{ 899{
863 struct vmw_user_context *uctx = 900 struct vmw_user_context *uctx =
864 container_of(ctx, struct vmw_user_context, res); 901 container_of(ctx_res, struct vmw_user_context, res);
865 struct vmw_ctx_binding *entry, *next;
866
867 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
868 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
869}
870 902
871/** 903 if (mob == NULL) {
872 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context 904 if (uctx->dx_query_mob) {
873 * 905 uctx->dx_query_mob->dx_query_ctx = NULL;
874 * @ctx: The context resource 906 vmw_dmabuf_unreference(&uctx->dx_query_mob);
875 * 907 uctx->dx_query_mob = NULL;
876 * Walks through the context binding list and rebinds all scrubbed 908 }
877 * resources.
878 */
879int vmw_context_rebind_all(struct vmw_resource *ctx)
880{
881 struct vmw_ctx_binding *entry;
882 struct vmw_user_context *uctx =
883 container_of(ctx, struct vmw_user_context, res);
884 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
885 int ret;
886 909
887 list_for_each_entry(entry, &cbs->list, ctx_list) { 910 return 0;
888 if (likely(!entry->bi.scrubbed)) 911 }
889 continue;
890 912
891 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == 913 /* Can only have one MOB per context for queries */
892 SVGA3D_INVALID_ID)) 914 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
893 continue; 915 return -EINVAL;
894 916
895 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); 917 mob->dx_query_ctx = ctx_res;
896 if (unlikely(ret != 0))
897 return ret;
898 918
899 entry->bi.scrubbed = false; 919 if (!uctx->dx_query_mob)
900 } 920 uctx->dx_query_mob = vmw_dmabuf_reference(mob);
901 921
902 return 0; 922 return 0;
903} 923}
904 924
905/** 925/**
906 * vmw_context_binding_list - Return a list of context bindings 926 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
907 *
908 * @ctx: The context resource
909 * 927 *
910 * Returns the current list of bindings of the given context. Note that 928 * @ctx_res: The context resource
911 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
912 */ 929 */
913struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) 930struct vmw_dma_buffer *
931vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
914{ 932{
915 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); 933 struct vmw_user_context *uctx =
916} 934 container_of(ctx_res, struct vmw_user_context, res);
917 935
918struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) 936 return uctx->dx_query_mob;
919{
920 return container_of(ctx, struct vmw_user_context, res)->man;
921} 937}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 000000000000..ce659a125f2b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
1/**************************************************************************
2 *
3 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Treat context OTables as resources to make use of the resource
29 * backing MOB eviction mechanism, that is used to read back the COTable
30 * whenever the backing MOB is evicted.
31 */
32
33#include "vmwgfx_drv.h"
34#include "vmwgfx_resource_priv.h"
35#include <ttm/ttm_placement.h>
36#include "vmwgfx_so.h"
37
38/**
39 * struct vmw_cotable - Context Object Table resource
40 *
41 * @res: struct vmw_resource we are deriving from.
42 * @ctx: non-refcounted pointer to the owning context.
43 * @size_read_back: Size of data read back during eviction.
44 * @seen_entries: Seen entries in command stream for this cotable.
45 * @type: The cotable type.
46 * @scrubbed: Whether the cotable has been scrubbed.
47 * @resource_list: List of resources in the cotable.
48 */
49struct vmw_cotable {
50 struct vmw_resource res;
51 struct vmw_resource *ctx;
52 size_t size_read_back;
53 int seen_entries;
54 u32 type;
55 bool scrubbed;
56 struct list_head resource_list;
57};
58
59/**
60 * struct vmw_cotable_info - Static info about cotable types
61 *
62 * @min_initial_entries: Min number of initial intries at cotable allocation
63 * for this cotable type.
64 * @size: Size of each entry.
65 */
66struct vmw_cotable_info {
67 u32 min_initial_entries;
68 u32 size;
69 void (*unbind_func)(struct vmw_private *, struct list_head *,
70 bool);
71};
72
73static const struct vmw_cotable_info co_info[] = {
74 {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
75 {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
76 {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
77 {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
78 {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
79 {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
80 {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
81 {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
82 {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
83 {1, sizeof(SVGACOTableDXQueryEntry), NULL},
84 {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
85};
86
87/*
88 * Cotables with bindings that we remove must be scrubbed first,
89 * otherwise, the device will swap in an invalid context when we remove
90 * bindings before scrubbing a cotable...
91 */
92const SVGACOTableType vmw_cotable_scrub_order[] = {
93 SVGA_COTABLE_RTVIEW,
94 SVGA_COTABLE_DSVIEW,
95 SVGA_COTABLE_SRVIEW,
96 SVGA_COTABLE_DXSHADER,
97 SVGA_COTABLE_ELEMENTLAYOUT,
98 SVGA_COTABLE_BLENDSTATE,
99 SVGA_COTABLE_DEPTHSTENCIL,
100 SVGA_COTABLE_RASTERIZERSTATE,
101 SVGA_COTABLE_SAMPLER,
102 SVGA_COTABLE_STREAMOUTPUT,
103 SVGA_COTABLE_DXQUERY,
104};
105
106static int vmw_cotable_bind(struct vmw_resource *res,
107 struct ttm_validate_buffer *val_buf);
108static int vmw_cotable_unbind(struct vmw_resource *res,
109 bool readback,
110 struct ttm_validate_buffer *val_buf);
111static int vmw_cotable_create(struct vmw_resource *res);
112static int vmw_cotable_destroy(struct vmw_resource *res);
113
114static const struct vmw_res_func vmw_cotable_func = {
115 .res_type = vmw_res_cotable,
116 .needs_backup = true,
117 .may_evict = true,
118 .type_name = "context guest backed object tables",
119 .backup_placement = &vmw_mob_placement,
120 .create = vmw_cotable_create,
121 .destroy = vmw_cotable_destroy,
122 .bind = vmw_cotable_bind,
123 .unbind = vmw_cotable_unbind,
124};
125
126/**
127 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
128 * vmw_cotable pointer
129 *
130 * @res: Pointer to the resource.
131 */
132static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
133{
134 return container_of(res, struct vmw_cotable, res);
135}
136
137/**
138 * vmw_cotable_destroy - Cotable resource destroy callback
139 *
140 * @res: Pointer to the cotable resource.
141 *
142 * There is no device cotable destroy command, so this function only
143 * makes sure that the resource id is set to invalid.
144 */
145static int vmw_cotable_destroy(struct vmw_resource *res)
146{
147 res->id = -1;
148 return 0;
149}
150
151/**
152 * vmw_cotable_unscrub - Undo a cotable unscrub operation
153 *
154 * @res: Pointer to the cotable resource
155 *
156 * This function issues commands to (re)bind the cotable to
157 * its backing mob, which needs to be validated and reserved at this point.
158 * This is identical to bind() except the function interface looks different.
159 */
160static int vmw_cotable_unscrub(struct vmw_resource *res)
161{
162 struct vmw_cotable *vcotbl = vmw_cotable(res);
163 struct vmw_private *dev_priv = res->dev_priv;
164 struct ttm_buffer_object *bo = &res->backup->base;
165 struct {
166 SVGA3dCmdHeader header;
167 SVGA3dCmdDXSetCOTable body;
168 } *cmd;
169
170 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
171 lockdep_assert_held(&bo->resv->lock.base);
172
173 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
174 if (!cmd) {
175 DRM_ERROR("Failed reserving FIFO space for cotable "
176 "binding.\n");
177 return -ENOMEM;
178 }
179
180 WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
181 WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
182 cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
183 cmd->header.size = sizeof(cmd->body);
184 cmd->body.cid = vcotbl->ctx->id;
185 cmd->body.type = vcotbl->type;
186 cmd->body.mobid = bo->mem.start;
187 cmd->body.validSizeInBytes = vcotbl->size_read_back;
188
189 vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
190 vcotbl->scrubbed = false;
191
192 return 0;
193}
194
195/**
196 * vmw_cotable_bind - Undo a cotable unscrub operation
197 *
198 * @res: Pointer to the cotable resource
199 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
200 * for convenience / fencing.
201 *
202 * This function issues commands to (re)bind the cotable to
203 * its backing mob, which needs to be validated and reserved at this point.
204 */
205static int vmw_cotable_bind(struct vmw_resource *res,
206 struct ttm_validate_buffer *val_buf)
207{
208 /*
209 * The create() callback may have changed @res->backup without
210 * the caller noticing, and with val_buf->bo still pointing to
211 * the old backup buffer. Although hackish, and not used currently,
212 * take the opportunity to correct the value here so that it's not
213 * misused in the future.
214 */
215 val_buf->bo = &res->backup->base;
216
217 return vmw_cotable_unscrub(res);
218}
219
220/**
221 * vmw_cotable_scrub - Scrub the cotable from the device.
222 *
223 * @res: Pointer to the cotable resource.
224 * @readback: Whether initiate a readback of the cotable data to the backup
225 * buffer.
226 *
227 * In some situations (context swapouts) it might be desirable to make the
228 * device forget about the cotable without performing a full unbind. A full
229 * unbind requires reserved backup buffers and it might not be possible to
230 * reserve them due to locking order violation issues. The vmw_cotable_scrub
231 * function implements a partial unbind() without that requirement but with the
232 * following restrictions.
233 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
234 * be called.
235 * 2) Before the cotable backing buffer is used by the CPU, or during the
236 * resource destruction, vmw_cotable_unbind() must be called.
237 */
238int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
239{
240 struct vmw_cotable *vcotbl = vmw_cotable(res);
241 struct vmw_private *dev_priv = res->dev_priv;
242 size_t submit_size;
243
244 struct {
245 SVGA3dCmdHeader header;
246 SVGA3dCmdDXReadbackCOTable body;
247 } *cmd0;
248 struct {
249 SVGA3dCmdHeader header;
250 SVGA3dCmdDXSetCOTable body;
251 } *cmd1;
252
253 if (vcotbl->scrubbed)
254 return 0;
255
256 if (co_info[vcotbl->type].unbind_func)
257 co_info[vcotbl->type].unbind_func(dev_priv,
258 &vcotbl->resource_list,
259 readback);
260 submit_size = sizeof(*cmd1);
261 if (readback)
262 submit_size += sizeof(*cmd0);
263
264 cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
265 if (!cmd1) {
266 DRM_ERROR("Failed reserving FIFO space for cotable "
267 "unbinding.\n");
268 return -ENOMEM;
269 }
270
271 vcotbl->size_read_back = 0;
272 if (readback) {
273 cmd0 = (void *) cmd1;
274 cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
275 cmd0->header.size = sizeof(cmd0->body);
276 cmd0->body.cid = vcotbl->ctx->id;
277 cmd0->body.type = vcotbl->type;
278 cmd1 = (void *) &cmd0[1];
279 vcotbl->size_read_back = res->backup_size;
280 }
281 cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
282 cmd1->header.size = sizeof(cmd1->body);
283 cmd1->body.cid = vcotbl->ctx->id;
284 cmd1->body.type = vcotbl->type;
285 cmd1->body.mobid = SVGA3D_INVALID_ID;
286 cmd1->body.validSizeInBytes = 0;
287 vmw_fifo_commit_flush(dev_priv, submit_size);
288 vcotbl->scrubbed = true;
289
290 /* Trigger a create() on next validate. */
291 res->id = -1;
292
293 return 0;
294}
295
296/**
297 * vmw_cotable_unbind - Cotable resource unbind callback
298 *
299 * @res: Pointer to the cotable resource.
300 * @readback: Whether to read back cotable data to the backup buffer.
301 * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
302 * for convenience / fencing.
303 *
304 * Unbinds the cotable from the device and fences the backup buffer.
305 */
306static int vmw_cotable_unbind(struct vmw_resource *res,
307 bool readback,
308 struct ttm_validate_buffer *val_buf)
309{
310 struct vmw_cotable *vcotbl = vmw_cotable(res);
311 struct vmw_private *dev_priv = res->dev_priv;
312 struct ttm_buffer_object *bo = val_buf->bo;
313 struct vmw_fence_obj *fence;
314 int ret;
315
316 if (list_empty(&res->mob_head))
317 return 0;
318
319 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
320 lockdep_assert_held(&bo->resv->lock.base);
321
322 mutex_lock(&dev_priv->binding_mutex);
323 if (!vcotbl->scrubbed)
324 vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
325 mutex_unlock(&dev_priv->binding_mutex);
326 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
327 vmw_fence_single_bo(bo, fence);
328 if (likely(fence != NULL))
329 vmw_fence_obj_unreference(&fence);
330
331 return ret;
332}
333
334/**
335 * vmw_cotable_readback - Read back a cotable without unbinding.
336 *
337 * @res: The cotable resource.
338 *
339 * Reads back a cotable to its backing mob without scrubbing the MOB from
340 * the cotable. The MOB is fenced for subsequent CPU access.
341 */
342static int vmw_cotable_readback(struct vmw_resource *res)
343{
344 struct vmw_cotable *vcotbl = vmw_cotable(res);
345 struct vmw_private *dev_priv = res->dev_priv;
346
347 struct {
348 SVGA3dCmdHeader header;
349 SVGA3dCmdDXReadbackCOTable body;
350 } *cmd;
351 struct vmw_fence_obj *fence;
352
353 if (!vcotbl->scrubbed) {
354 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
355 SVGA3D_INVALID_ID);
356 if (!cmd) {
357 DRM_ERROR("Failed reserving FIFO space for cotable "
358 "readback.\n");
359 return -ENOMEM;
360 }
361 cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
362 cmd->header.size = sizeof(cmd->body);
363 cmd->body.cid = vcotbl->ctx->id;
364 cmd->body.type = vcotbl->type;
365 vcotbl->size_read_back = res->backup_size;
366 vmw_fifo_commit(dev_priv, sizeof(*cmd));
367 }
368
369 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
370 vmw_fence_single_bo(&res->backup->base, fence);
371 vmw_fence_obj_unreference(&fence);
372
373 return 0;
374}
375
376/**
377 * vmw_cotable_resize - Resize a cotable.
378 *
379 * @res: The cotable resource.
380 * @new_size: The new size.
381 *
382 * Resizes a cotable and binds the new backup buffer.
383 * On failure the cotable is left intact.
384 * Important! This function may not fail once the MOB switch has been
385 * committed to hardware. That would put the device context in an
386 * invalid state which we can't currently recover from.
387 */
388static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
389{
390 struct vmw_private *dev_priv = res->dev_priv;
391 struct vmw_cotable *vcotbl = vmw_cotable(res);
392 struct vmw_dma_buffer *buf, *old_buf = res->backup;
393 struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
394 size_t old_size = res->backup_size;
395 size_t old_size_read_back = vcotbl->size_read_back;
396 size_t cur_size_read_back;
397 struct ttm_bo_kmap_obj old_map, new_map;
398 int ret;
399 size_t i;
400
401 ret = vmw_cotable_readback(res);
402 if (ret)
403 return ret;
404
405 cur_size_read_back = vcotbl->size_read_back;
406 vcotbl->size_read_back = old_size_read_back;
407
408 /*
409 * While device is processing, Allocate and reserve a buffer object
410 * for the new COTable. Initially pin the buffer object to make sure
411 * we can use tryreserve without failure.
412 */
413 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
414 if (!buf)
415 return -ENOMEM;
416
417 ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
418 true, vmw_dmabuf_bo_free);
419 if (ret) {
420 DRM_ERROR("Failed initializing new cotable MOB.\n");
421 return ret;
422 }
423
424 bo = &buf->base;
425 WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
426
427 ret = ttm_bo_wait(old_bo, false, false, false);
428 if (unlikely(ret != 0)) {
429 DRM_ERROR("Failed waiting for cotable unbind.\n");
430 goto out_wait;
431 }
432
433 /*
434 * Do a page by page copy of COTables. This eliminates slow vmap()s.
435 * This should really be a TTM utility.
436 */
437 for (i = 0; i < old_bo->num_pages; ++i) {
438 bool dummy;
439
440 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
441 if (unlikely(ret != 0)) {
442 DRM_ERROR("Failed mapping old COTable on resize.\n");
443 goto out_wait;
444 }
445 ret = ttm_bo_kmap(bo, i, 1, &new_map);
446 if (unlikely(ret != 0)) {
447 DRM_ERROR("Failed mapping new COTable on resize.\n");
448 goto out_map_new;
449 }
450 memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
451 ttm_kmap_obj_virtual(&old_map, &dummy),
452 PAGE_SIZE);
453 ttm_bo_kunmap(&new_map);
454 ttm_bo_kunmap(&old_map);
455 }
456
457 /* Unpin new buffer, and switch backup buffers. */
458 ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
459 if (unlikely(ret != 0)) {
460 DRM_ERROR("Failed validating new COTable backup buffer.\n");
461 goto out_wait;
462 }
463
464 res->backup = buf;
465 res->backup_size = new_size;
466 vcotbl->size_read_back = cur_size_read_back;
467
468 /*
469 * Now tell the device to switch. If this fails, then we need to
470 * revert the full resize.
471 */
472 ret = vmw_cotable_unscrub(res);
473 if (ret) {
474 DRM_ERROR("Failed switching COTable backup buffer.\n");
475 res->backup = old_buf;
476 res->backup_size = old_size;
477 vcotbl->size_read_back = old_size_read_back;
478 goto out_wait;
479 }
480
481 /* Let go of the old mob. */
482 list_del(&res->mob_head);
483 list_add_tail(&res->mob_head, &buf->res_list);
484 vmw_dmabuf_unreference(&old_buf);
485 res->id = vcotbl->type;
486
487 return 0;
488
489out_map_new:
490 ttm_bo_kunmap(&old_map);
491out_wait:
492 ttm_bo_unreserve(bo);
493 vmw_dmabuf_unreference(&buf);
494
495 return ret;
496}
497
498/**
499 * vmw_cotable_create - Cotable resource create callback
500 *
501 * @res: Pointer to a cotable resource.
502 *
503 * There is no separate create command for cotables, so this callback, which
504 * is called before bind() in the validation sequence is instead used for two
505 * things.
506 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
507 * buffer, that is, if @res->mob_head is non-empty.
508 * 2) Resize the cotable if needed.
509 */
510static int vmw_cotable_create(struct vmw_resource *res)
511{
512 struct vmw_cotable *vcotbl = vmw_cotable(res);
513 size_t new_size = res->backup_size;
514 size_t needed_size;
515 int ret;
516
517 /* Check whether we need to resize the cotable */
518 needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
519 while (needed_size > new_size)
520 new_size *= 2;
521
522 if (likely(new_size <= res->backup_size)) {
523 if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
524 ret = vmw_cotable_unscrub(res);
525 if (ret)
526 return ret;
527 }
528 res->id = vcotbl->type;
529 return 0;
530 }
531
532 return vmw_cotable_resize(res, new_size);
533}
534
535/**
536 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
537 *
538 * @res: Pointer to a cotable resource.
539 *
540 * The final (part of resource destruction) destroy callback.
541 */
542static void vmw_hw_cotable_destroy(struct vmw_resource *res)
543{
544 (void) vmw_cotable_destroy(res);
545}
546
547static size_t cotable_acc_size;
548
549/**
550 * vmw_cotable_free - Cotable resource destructor
551 *
552 * @res: Pointer to a cotable resource.
553 */
554static void vmw_cotable_free(struct vmw_resource *res)
555{
556 struct vmw_private *dev_priv = res->dev_priv;
557
558 kfree(res);
559 ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
560}
561
562/**
563 * vmw_cotable_alloc - Create a cotable resource
564 *
565 * @dev_priv: Pointer to a device private struct.
566 * @ctx: Pointer to the context resource.
567 * The cotable resource will not add a refcount.
568 * @type: The cotable type.
569 */
570struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
571 struct vmw_resource *ctx,
572 u32 type)
573{
574 struct vmw_cotable *vcotbl;
575 int ret;
576 u32 num_entries;
577
578 if (unlikely(cotable_acc_size == 0))
579 cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
580
581 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
582 cotable_acc_size, false, true);
583 if (unlikely(ret))
584 return ERR_PTR(ret);
585
586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
587 if (unlikely(vcotbl == NULL)) {
588 ret = -ENOMEM;
589 goto out_no_alloc;
590 }
591
592 ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
593 vmw_cotable_free, &vmw_cotable_func);
594 if (unlikely(ret != 0))
595 goto out_no_init;
596
597 INIT_LIST_HEAD(&vcotbl->resource_list);
598 vcotbl->res.id = type;
599 vcotbl->res.backup_size = PAGE_SIZE;
600 num_entries = PAGE_SIZE / co_info[type].size;
601 if (num_entries < co_info[type].min_initial_entries) {
602 vcotbl->res.backup_size = co_info[type].min_initial_entries *
603 co_info[type].size;
604 vcotbl->res.backup_size =
605 (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
606 }
607
608 vcotbl->scrubbed = true;
609 vcotbl->seen_entries = -1;
610 vcotbl->type = type;
611 vcotbl->ctx = ctx;
612
613 vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
614
615 return &vcotbl->res;
616
617out_no_init:
618 kfree(vcotbl);
619out_no_alloc:
620 ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
621 return ERR_PTR(ret);
622}
623
624/**
625 * vmw_cotable_notify - Notify the cotable about an item creation
626 *
627 * @res: Pointer to a cotable resource.
628 * @id: Item id.
629 */
630int vmw_cotable_notify(struct vmw_resource *res, int id)
631{
632 struct vmw_cotable *vcotbl = vmw_cotable(res);
633
634 if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
635 DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
636 (unsigned) vcotbl->type, id);
637 return -EINVAL;
638 }
639
640 if (vcotbl->seen_entries < id) {
641 /* Trigger a call to create() on next validate */
642 res->id = -1;
643 vcotbl->seen_entries = id;
644 }
645
646 return 0;
647}
648
649/**
650 * vmw_cotable_add_view - add a view to the cotable's list of active views.
651 *
652 * @res: pointer struct vmw_resource representing the cotable.
653 * @head: pointer to the struct list_head member of the resource, dedicated
654 * to the cotable active resource list.
655 */
656void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
657{
658 struct vmw_cotable *vcotbl =
659 container_of(res, struct vmw_cotable, res);
660
661 list_add_tail(head, &vcotbl->resource_list);
662}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375763dc..299925a1f6c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
32 32
33 33
34/** 34/**
35 * vmw_dmabuf_to_placement - Validate a buffer to placement. 35 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
36 * 36 *
37 * @dev_priv: Driver private. 37 * @dev_priv: Driver private.
38 * @buf: DMA buffer to move. 38 * @buf: DMA buffer to move.
39 * @pin: Pin buffer if true. 39 * @placement: The placement to pin it.
40 * @interruptible: Use interruptible wait. 40 * @interruptible: Use interruptible wait.
41 * 41 *
42 * May only be called by the current master since it assumes that the
43 * master lock is the current master's lock.
44 * This function takes the master's lock in write mode.
45 * Flushes and unpins the query bo to avoid failures.
46 *
47 * Returns 42 * Returns
48 * -ERESTARTSYS if interrupted by a signal. 43 * -ERESTARTSYS if interrupted by a signal.
49 */ 44 */
50int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, 45int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
51 struct vmw_dma_buffer *buf, 46 struct vmw_dma_buffer *buf,
52 struct ttm_placement *placement, 47 struct ttm_placement *placement,
53 bool interruptible) 48 bool interruptible)
54{ 49{
55 struct ttm_buffer_object *bo = &buf->base; 50 struct ttm_buffer_object *bo = &buf->base;
56 int ret; 51 int ret;
@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
66 goto err; 61 goto err;
67 62
68 ret = ttm_bo_validate(bo, placement, interruptible, false); 63 ret = ttm_bo_validate(bo, placement, interruptible, false);
64 if (!ret)
65 vmw_bo_pin_reserved(buf, true);
69 66
70 ttm_bo_unreserve(bo); 67 ttm_bo_unreserve(bo);
71 68
@@ -75,12 +72,10 @@ err:
75} 72}
76 73
77/** 74/**
78 * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr. 75 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
79 * 76 *
80 * May only be called by the current master since it assumes that the 77 * This function takes the reservation_sem in write mode.
81 * master lock is the current master's lock. 78 * Flushes and unpins the query bo to avoid failures.
82 * This function takes the master's lock in write mode.
83 * Flushes and unpins the query bo if @pin == true to avoid failures.
84 * 79 *
85 * @dev_priv: Driver private. 80 * @dev_priv: Driver private.
86 * @buf: DMA buffer to move. 81 * @buf: DMA buffer to move.
@@ -90,55 +85,34 @@ err:
90 * Returns 85 * Returns
91 * -ERESTARTSYS if interrupted by a signal. 86 * -ERESTARTSYS if interrupted by a signal.
92 */ 87 */
93int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, 88int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
94 struct vmw_dma_buffer *buf, 89 struct vmw_dma_buffer *buf,
95 bool pin, bool interruptible) 90 bool interruptible)
96{ 91{
97 struct ttm_buffer_object *bo = &buf->base; 92 struct ttm_buffer_object *bo = &buf->base;
98 struct ttm_placement *placement;
99 int ret; 93 int ret;
100 94
101 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); 95 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
102 if (unlikely(ret != 0)) 96 if (unlikely(ret != 0))
103 return ret; 97 return ret;
104 98
105 if (pin) 99 vmw_execbuf_release_pinned_bo(dev_priv);
106 vmw_execbuf_release_pinned_bo(dev_priv);
107 100
108 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 101 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
109 if (unlikely(ret != 0)) 102 if (unlikely(ret != 0))
110 goto err; 103 goto err;
111 104
112 /** 105 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
113 * Put BO in VRAM if there is space, otherwise as a GMR. 106 false);
114 * If there is no space in VRAM and GMR ids are all used up,
115 * start evicting GMRs to make room. If the DMA buffer can't be
116 * used as a GMR, this will return -ENOMEM.
117 */
118
119 if (pin)
120 placement = &vmw_vram_gmr_ne_placement;
121 else
122 placement = &vmw_vram_gmr_placement;
123
124 ret = ttm_bo_validate(bo, placement, interruptible, false);
125 if (likely(ret == 0) || ret == -ERESTARTSYS) 107 if (likely(ret == 0) || ret == -ERESTARTSYS)
126 goto err_unreserve; 108 goto out_unreserve;
127
128 109
129 /** 110 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
130 * If that failed, try VRAM again, this time evicting
131 * previous contents.
132 */
133
134 if (pin)
135 placement = &vmw_vram_ne_placement;
136 else
137 placement = &vmw_vram_placement;
138 111
139 ret = ttm_bo_validate(bo, placement, interruptible, false); 112out_unreserve:
113 if (!ret)
114 vmw_bo_pin_reserved(buf, true);
140 115
141err_unreserve:
142 ttm_bo_unreserve(bo); 116 ttm_bo_unreserve(bo);
143err: 117err:
144 ttm_write_unlock(&dev_priv->reservation_sem); 118 ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@ err:
146} 120}
147 121
148/** 122/**
149 * vmw_dmabuf_to_vram - Move a buffer to vram. 123 * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
150 * 124 *
151 * May only be called by the current master since it assumes that the 125 * This function takes the reservation_sem in write mode.
152 * master lock is the current master's lock. 126 * Flushes and unpins the query bo to avoid failures.
153 * This function takes the master's lock in write mode.
154 * 127 *
155 * @dev_priv: Driver private. 128 * @dev_priv: Driver private.
156 * @buf: DMA buffer to move. 129 * @buf: DMA buffer to move.
157 * @pin: Pin buffer in vram if true.
158 * @interruptible: Use interruptible wait. 130 * @interruptible: Use interruptible wait.
159 * 131 *
160 * Returns 132 * Returns
161 * -ERESTARTSYS if interrupted by a signal. 133 * -ERESTARTSYS if interrupted by a signal.
162 */ 134 */
163int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, 135int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
164 struct vmw_dma_buffer *buf, 136 struct vmw_dma_buffer *buf,
165 bool pin, bool interruptible) 137 bool interruptible)
166{ 138{
167 struct ttm_placement *placement; 139 return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
168 140 interruptible);
169 if (pin)
170 placement = &vmw_vram_ne_placement;
171 else
172 placement = &vmw_vram_placement;
173
174 return vmw_dmabuf_to_placement(dev_priv, buf,
175 placement,
176 interruptible);
177} 141}
178 142
179/** 143/**
180 * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram. 144 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
181 * 145 *
182 * May only be called by the current master since it assumes that the 146 * This function takes the reservation_sem in write mode.
183 * master lock is the current master's lock. 147 * Flushes and unpins the query bo to avoid failures.
184 * This function takes the master's lock in write mode.
185 * Flushes and unpins the query bo if @pin == true to avoid failures.
186 * 148 *
187 * @dev_priv: Driver private. 149 * @dev_priv: Driver private.
188 * @buf: DMA buffer to move. 150 * @buf: DMA buffer to pin.
189 * @pin: Pin buffer in vram if true.
190 * @interruptible: Use interruptible wait. 151 * @interruptible: Use interruptible wait.
191 * 152 *
192 * Returns 153 * Returns
193 * -ERESTARTSYS if interrupted by a signal. 154 * -ERESTARTSYS if interrupted by a signal.
194 */ 155 */
195int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, 156int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
196 struct vmw_dma_buffer *buf, 157 struct vmw_dma_buffer *buf,
197 bool pin, bool interruptible) 158 bool interruptible)
198{ 159{
199 struct ttm_buffer_object *bo = &buf->base; 160 struct ttm_buffer_object *bo = &buf->base;
200 struct ttm_placement placement; 161 struct ttm_placement placement;
201 struct ttm_place place; 162 struct ttm_place place;
202 int ret = 0; 163 int ret = 0;
203 164
204 if (pin) 165 place = vmw_vram_placement.placement[0];
205 place = vmw_vram_ne_placement.placement[0];
206 else
207 place = vmw_vram_placement.placement[0];
208 place.lpfn = bo->num_pages; 166 place.lpfn = bo->num_pages;
209
210 placement.num_placement = 1; 167 placement.num_placement = 1;
211 placement.placement = &place; 168 placement.placement = &place;
212 placement.num_busy_placement = 1; 169 placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
216 if (unlikely(ret != 0)) 173 if (unlikely(ret != 0))
217 return ret; 174 return ret;
218 175
219 if (pin) 176 vmw_execbuf_release_pinned_bo(dev_priv);
220 vmw_execbuf_release_pinned_bo(dev_priv);
221 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 177 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
222 if (unlikely(ret != 0)) 178 if (unlikely(ret != 0))
223 goto err_unlock; 179 goto err_unlock;
224 180
225 /* Is this buffer already in vram but not at the start of it? */ 181 /*
182 * Is this buffer already in vram but not at the start of it?
183 * In that case, evict it first because TTM isn't good at handling
184 * that situation.
185 */
226 if (bo->mem.mem_type == TTM_PL_VRAM && 186 if (bo->mem.mem_type == TTM_PL_VRAM &&
227 bo->mem.start < bo->num_pages && 187 bo->mem.start < bo->num_pages &&
228 bo->mem.start > 0) 188 bo->mem.start > 0)
@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
230 190
231 ret = ttm_bo_validate(bo, &placement, interruptible, false); 191 ret = ttm_bo_validate(bo, &placement, interruptible, false);
232 192
233 /* For some reason we didn't up at the start of vram */ 193 /* For some reason we didn't end up at the start of vram */
234 WARN_ON(ret == 0 && bo->offset != 0); 194 WARN_ON(ret == 0 && bo->offset != 0);
195 if (!ret)
196 vmw_bo_pin_reserved(buf, true);
235 197
236 ttm_bo_unreserve(bo); 198 ttm_bo_unreserve(bo);
237err_unlock: 199err_unlock:
@@ -240,13 +202,10 @@ err_unlock:
240 return ret; 202 return ret;
241} 203}
242 204
243
244/** 205/**
245 * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer. 206 * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
246 * 207 *
247 * May only be called by the current master since it assumes that the 208 * This function takes the reservation_sem in write mode.
248 * master lock is the current master's lock.
249 * This function takes the master's lock in write mode.
250 * 209 *
251 * @dev_priv: Driver private. 210 * @dev_priv: Driver private.
252 * @buf: DMA buffer to unpin. 211 * @buf: DMA buffer to unpin.
@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
259 struct vmw_dma_buffer *buf, 218 struct vmw_dma_buffer *buf,
260 bool interruptible) 219 bool interruptible)
261{ 220{
262 /* 221 struct ttm_buffer_object *bo = &buf->base;
263 * We could in theory early out if the buffer is 222 int ret;
264 * unpinned but we need to lock and reserve the buffer 223
265 * anyways so we don't gain much by that. 224 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
266 */ 225 if (unlikely(ret != 0))
267 return vmw_dmabuf_to_placement(dev_priv, buf, 226 return ret;
268 &vmw_evictable_placement,
269 interruptible);
270}
271 227
228 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
229 if (unlikely(ret != 0))
230 goto err;
231
232 vmw_bo_pin_reserved(buf, false);
233
234 ttm_bo_unreserve(bo);
235
236err:
237 ttm_read_unlock(&dev_priv->reservation_sem);
238 return ret;
239}
272 240
273/** 241/**
274 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement 242 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
291 259
292 260
293/** 261/**
294 * vmw_bo_pin - Pin or unpin a buffer object without moving it. 262 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
295 * 263 *
296 * @bo: The buffer object. Must be reserved. 264 * @vbo: The buffer object. Must be reserved.
297 * @pin: Whether to pin or unpin. 265 * @pin: Whether to pin or unpin.
298 * 266 *
299 */ 267 */
300void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) 268void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
301{ 269{
302 struct ttm_place pl; 270 struct ttm_place pl;
303 struct ttm_placement placement; 271 struct ttm_placement placement;
272 struct ttm_buffer_object *bo = &vbo->base;
304 uint32_t old_mem_type = bo->mem.mem_type; 273 uint32_t old_mem_type = bo->mem.mem_type;
305 int ret; 274 int ret;
306 275
307 lockdep_assert_held(&bo->resv->lock.base); 276 lockdep_assert_held(&bo->resv->lock.base);
308 277
278 if (pin) {
279 if (vbo->pin_count++ > 0)
280 return;
281 } else {
282 WARN_ON(vbo->pin_count <= 0);
283 if (--vbo->pin_count > 0)
284 return;
285 }
286
309 pl.fpfn = 0; 287 pl.fpfn = 0;
310 pl.lpfn = 0; 288 pl.lpfn = 0;
311 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB 289 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5cf617c..f97ec5686cbc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
28 28
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "vmwgfx_drv.h" 30#include "vmwgfx_drv.h"
31#include "vmwgfx_binding.h"
31#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
32#include <drm/ttm/ttm_bo_driver.h> 33#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_object.h> 34#include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
127#define DRM_IOCTL_VMW_SYNCCPU \ 128#define DRM_IOCTL_VMW_SYNCCPU \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 129 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
129 struct drm_vmw_synccpu_arg) 130 struct drm_vmw_synccpu_arg)
131#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
133 struct drm_vmw_context_arg)
130 134
131/** 135/**
132 * The core DRM version of this macro doesn't account for 136 * The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
168 DRM_UNLOCKED | DRM_RENDER_ALLOW), 172 DRM_UNLOCKED | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 173 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 174 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 175 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
172 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 176 DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 177 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
174 DRM_UNLOCKED | DRM_RENDER_ALLOW), 178 DRM_UNLOCKED | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 179 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
206 VMW_IOCTL_DEF(VMW_SYNCCPU, 210 VMW_IOCTL_DEF(VMW_SYNCCPU,
207 vmw_user_dmabuf_synccpu_ioctl, 211 vmw_user_dmabuf_synccpu_ioctl,
208 DRM_UNLOCKED | DRM_RENDER_ALLOW), 212 DRM_UNLOCKED | DRM_RENDER_ALLOW),
213 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
214 vmw_extended_context_define_ioctl,
215 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
209}; 216};
210 217
211static struct pci_device_id vmw_pci_id_list[] = { 218static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
278 DRM_INFO(" Command Buffers 2.\n"); 285 DRM_INFO(" Command Buffers 2.\n");
279 if (capabilities & SVGA_CAP_GBOBJECTS) 286 if (capabilities & SVGA_CAP_GBOBJECTS)
280 DRM_INFO(" Guest Backed Resources.\n"); 287 DRM_INFO(" Guest Backed Resources.\n");
288 if (capabilities & SVGA_CAP_DX)
289 DRM_INFO(" DX Features.\n");
281} 290}
282 291
283/** 292/**
@@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
296static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 305static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
297{ 306{
298 int ret; 307 int ret;
299 struct ttm_buffer_object *bo; 308 struct vmw_dma_buffer *vbo;
300 struct ttm_bo_kmap_obj map; 309 struct ttm_bo_kmap_obj map;
301 volatile SVGA3dQueryResult *result; 310 volatile SVGA3dQueryResult *result;
302 bool dummy; 311 bool dummy;
303 312
304 /* 313 /*
305 * Create the bo as pinned, so that a tryreserve will 314 * Create the vbo as pinned, so that a tryreserve will
306 * immediately succeed. This is because we're the only 315 * immediately succeed. This is because we're the only
307 * user of the bo currently. 316 * user of the bo currently.
308 */ 317 */
309 ret = ttm_bo_create(&dev_priv->bdev, 318 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
310 PAGE_SIZE, 319 if (!vbo)
311 ttm_bo_type_device, 320 return -ENOMEM;
312 &vmw_sys_ne_placement,
313 0, false, NULL,
314 &bo);
315 321
322 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
323 &vmw_sys_ne_placement, false,
324 &vmw_dmabuf_bo_free);
316 if (unlikely(ret != 0)) 325 if (unlikely(ret != 0))
317 return ret; 326 return ret;
318 327
319 ret = ttm_bo_reserve(bo, false, true, false, NULL); 328 ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
320 BUG_ON(ret != 0); 329 BUG_ON(ret != 0);
330 vmw_bo_pin_reserved(vbo, true);
321 331
322 ret = ttm_bo_kmap(bo, 0, 1, &map); 332 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
323 if (likely(ret == 0)) { 333 if (likely(ret == 0)) {
324 result = ttm_kmap_obj_virtual(&map, &dummy); 334 result = ttm_kmap_obj_virtual(&map, &dummy);
325 result->totalSize = sizeof(*result); 335 result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
327 result->result32 = 0xff; 337 result->result32 = 0xff;
328 ttm_bo_kunmap(&map); 338 ttm_bo_kunmap(&map);
329 } 339 }
330 vmw_bo_pin(bo, false); 340 vmw_bo_pin_reserved(vbo, false);
331 ttm_bo_unreserve(bo); 341 ttm_bo_unreserve(&vbo->base);
332 342
333 if (unlikely(ret != 0)) { 343 if (unlikely(ret != 0)) {
334 DRM_ERROR("Dummy query buffer map failed.\n"); 344 DRM_ERROR("Dummy query buffer map failed.\n");
335 ttm_bo_unref(&bo); 345 vmw_dmabuf_unreference(&vbo);
336 } else 346 } else
337 dev_priv->dummy_query_bo = bo; 347 dev_priv->dummy_query_bo = vbo;
338 348
339 return ret; 349 return ret;
340} 350}
341 351
352/**
353 * vmw_request_device_late - Perform late device setup
354 *
355 * @dev_priv: Pointer to device private.
356 *
357 * This function performs setup of otables and enables large command
358 * buffer submission. These tasks are split out to a separate function
359 * because it reverts vmw_release_device_early and is intended to be used
360 * by an error path in the hibernation code.
361 */
362static int vmw_request_device_late(struct vmw_private *dev_priv)
363{
364 int ret;
365
366 if (dev_priv->has_mob) {
367 ret = vmw_otables_setup(dev_priv);
368 if (unlikely(ret != 0)) {
369 DRM_ERROR("Unable to initialize "
370 "guest Memory OBjects.\n");
371 return ret;
372 }
373 }
374
375 if (dev_priv->cman) {
376 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
377 256*4096, 2*4096);
378 if (ret) {
379 struct vmw_cmdbuf_man *man = dev_priv->cman;
380
381 dev_priv->cman = NULL;
382 vmw_cmdbuf_man_destroy(man);
383 }
384 }
385
386 return 0;
387}
388
342static int vmw_request_device(struct vmw_private *dev_priv) 389static int vmw_request_device(struct vmw_private *dev_priv)
343{ 390{
344 int ret; 391 int ret;
@@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
349 return ret; 396 return ret;
350 } 397 }
351 vmw_fence_fifo_up(dev_priv->fman); 398 vmw_fence_fifo_up(dev_priv->fman);
352 if (dev_priv->has_mob) { 399 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
353 ret = vmw_otables_setup(dev_priv); 400 if (IS_ERR(dev_priv->cman)) {
354 if (unlikely(ret != 0)) { 401 dev_priv->cman = NULL;
355 DRM_ERROR("Unable to initialize " 402 dev_priv->has_dx = false;
356 "guest Memory OBjects.\n");
357 goto out_no_mob;
358 }
359 } 403 }
404
405 ret = vmw_request_device_late(dev_priv);
406 if (ret)
407 goto out_no_mob;
408
360 ret = vmw_dummy_query_bo_create(dev_priv); 409 ret = vmw_dummy_query_bo_create(dev_priv);
361 if (unlikely(ret != 0)) 410 if (unlikely(ret != 0))
362 goto out_no_query_bo; 411 goto out_no_query_bo;
@@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
364 return 0; 413 return 0;
365 414
366out_no_query_bo: 415out_no_query_bo:
367 if (dev_priv->has_mob) 416 if (dev_priv->cman)
417 vmw_cmdbuf_remove_pool(dev_priv->cman);
418 if (dev_priv->has_mob) {
419 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
368 vmw_otables_takedown(dev_priv); 420 vmw_otables_takedown(dev_priv);
421 }
422 if (dev_priv->cman)
423 vmw_cmdbuf_man_destroy(dev_priv->cman);
369out_no_mob: 424out_no_mob:
370 vmw_fence_fifo_down(dev_priv->fman); 425 vmw_fence_fifo_down(dev_priv->fman);
371 vmw_fifo_release(dev_priv, &dev_priv->fifo); 426 vmw_fifo_release(dev_priv, &dev_priv->fifo);
372 return ret; 427 return ret;
373} 428}
374 429
375static void vmw_release_device(struct vmw_private *dev_priv) 430/**
431 * vmw_release_device_early - Early part of fifo takedown.
432 *
433 * @dev_priv: Pointer to device private struct.
434 *
435 * This is the first part of command submission takedown, to be called before
436 * buffer management is taken down.
437 */
438static void vmw_release_device_early(struct vmw_private *dev_priv)
376{ 439{
377 /* 440 /*
378 * Previous destructions should've released 441 * Previous destructions should've released
@@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
381 444
382 BUG_ON(dev_priv->pinned_bo != NULL); 445 BUG_ON(dev_priv->pinned_bo != NULL);
383 446
384 ttm_bo_unref(&dev_priv->dummy_query_bo); 447 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
385 if (dev_priv->has_mob) 448 if (dev_priv->cman)
386 vmw_otables_takedown(dev_priv); 449 vmw_cmdbuf_remove_pool(dev_priv->cman);
387 vmw_fence_fifo_down(dev_priv->fman);
388 vmw_fifo_release(dev_priv, &dev_priv->fifo);
389}
390
391
392/**
393 * Increase the 3d resource refcount.
394 * If the count was prevously zero, initialize the fifo, switching to svga
395 * mode. Note that the master holds a ref as well, and may request an
396 * explicit switch to svga mode if fb is not running, using @unhide_svga.
397 */
398int vmw_3d_resource_inc(struct vmw_private *dev_priv,
399 bool unhide_svga)
400{
401 int ret = 0;
402 450
403 mutex_lock(&dev_priv->release_mutex); 451 if (dev_priv->has_mob) {
404 if (unlikely(dev_priv->num_3d_resources++ == 0)) { 452 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
405 ret = vmw_request_device(dev_priv); 453 vmw_otables_takedown(dev_priv);
406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) {
409 vmw_write(dev_priv, SVGA_REG_ENABLE,
410 vmw_read(dev_priv, SVGA_REG_ENABLE) &
411 ~SVGA_REG_ENABLE_HIDE);
412 } 454 }
413
414 mutex_unlock(&dev_priv->release_mutex);
415 return ret;
416} 455}
417 456
418/** 457/**
419 * Decrease the 3d resource refcount. 458 * vmw_release_device_late - Late part of fifo takedown.
420 * If the count reaches zero, disable the fifo, switching to vga mode. 459 *
421 * Note that the master holds a refcount as well, and may request an 460 * @dev_priv: Pointer to device private struct.
422 * explicit switch to vga mode when it releases its refcount to account 461 *
423 * for the situation of an X server vt switch to VGA with 3d resources 462 * This is the last part of the command submission takedown, to be called when
424 * active. 463 * command submission is no longer needed. It may wait on pending fences.
425 */ 464 */
426void vmw_3d_resource_dec(struct vmw_private *dev_priv, 465static void vmw_release_device_late(struct vmw_private *dev_priv)
427 bool hide_svga)
428{ 466{
429 int32_t n3d; 467 vmw_fence_fifo_down(dev_priv->fman);
430 468 if (dev_priv->cman)
431 mutex_lock(&dev_priv->release_mutex); 469 vmw_cmdbuf_man_destroy(dev_priv->cman);
432 if (unlikely(--dev_priv->num_3d_resources == 0))
433 vmw_release_device(dev_priv);
434 else if (hide_svga)
435 vmw_write(dev_priv, SVGA_REG_ENABLE,
436 vmw_read(dev_priv, SVGA_REG_ENABLE) |
437 SVGA_REG_ENABLE_HIDE);
438
439 n3d = (int32_t) dev_priv->num_3d_resources;
440 mutex_unlock(&dev_priv->release_mutex);
441 470
442 BUG_ON(n3d < 0); 471 vmw_fifo_release(dev_priv, &dev_priv->fifo);
443} 472}
444 473
445/** 474/**
@@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
603 spin_lock_init(&dev_priv->hw_lock); 632 spin_lock_init(&dev_priv->hw_lock);
604 spin_lock_init(&dev_priv->waiter_lock); 633 spin_lock_init(&dev_priv->waiter_lock);
605 spin_lock_init(&dev_priv->cap_lock); 634 spin_lock_init(&dev_priv->cap_lock);
635 spin_lock_init(&dev_priv->svga_lock);
606 636
607 for (i = vmw_res_context; i < vmw_res_max; ++i) { 637 for (i = vmw_res_context; i < vmw_res_max; ++i) {
608 idr_init(&dev_priv->res_idr[i]); 638 idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
673 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 703 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
674 dev_priv->max_mob_size = 704 dev_priv->max_mob_size =
675 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 705 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
676 } else 706 dev_priv->stdu_max_width =
707 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
708 dev_priv->stdu_max_height =
709 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
710
711 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
712 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
713 dev_priv->texture_max_width = vmw_read(dev_priv,
714 SVGA_REG_DEV_CAP);
715 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
716 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
717 dev_priv->texture_max_height = vmw_read(dev_priv,
718 SVGA_REG_DEV_CAP);
719 } else {
720 dev_priv->texture_max_width = 8192;
721 dev_priv->texture_max_height = 8192;
677 dev_priv->prim_bb_mem = dev_priv->vram_size; 722 dev_priv->prim_bb_mem = dev_priv->vram_size;
723 }
724
725 vmw_print_capabilities(dev_priv->capabilities);
678 726
679 ret = vmw_dma_masks(dev_priv); 727 ret = vmw_dma_masks(dev_priv);
680 if (unlikely(ret != 0)) 728 if (unlikely(ret != 0))
681 goto out_err0; 729 goto out_err0;
682 730
683 /*
684 * Limit back buffer size to VRAM size. Remove this once
685 * screen targets are implemented.
686 */
687 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
688 dev_priv->prim_bb_mem = dev_priv->vram_size;
689
690 vmw_print_capabilities(dev_priv->capabilities);
691
692 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 731 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
693 DRM_INFO("Max GMR ids is %u\n", 732 DRM_INFO("Max GMR ids is %u\n",
694 (unsigned)dev_priv->max_gmr_ids); 733 (unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
714 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
715 754
716 755
717 ret = ttm_bo_device_init(&dev_priv->bdev,
718 dev_priv->bo_global_ref.ref.object,
719 &vmw_bo_driver,
720 dev->anon_inode->i_mapping,
721 VMWGFX_FILE_PAGE_OFFSET,
722 false);
723 if (unlikely(ret != 0)) {
724 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
725 goto out_err1;
726 }
727
728 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 756 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
729 dev_priv->mmio_size); 757 dev_priv->mmio_size);
730 758
@@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
787 goto out_no_fman; 815 goto out_no_fman;
788 } 816 }
789 817
818 ret = ttm_bo_device_init(&dev_priv->bdev,
819 dev_priv->bo_global_ref.ref.object,
820 &vmw_bo_driver,
821 dev->anon_inode->i_mapping,
822 VMWGFX_FILE_PAGE_OFFSET,
823 false);
824 if (unlikely(ret != 0)) {
825 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
826 goto out_no_bdev;
827 }
790 828
829 /*
830 * Enable VRAM, but initially don't use it until SVGA is enabled and
831 * unhidden.
832 */
791 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, 833 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
792 (dev_priv->vram_size >> PAGE_SHIFT)); 834 (dev_priv->vram_size >> PAGE_SHIFT));
793 if (unlikely(ret != 0)) { 835 if (unlikely(ret != 0)) {
794 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 836 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
795 goto out_no_vram; 837 goto out_no_vram;
796 } 838 }
839 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
797 840
798 dev_priv->has_gmr = true; 841 dev_priv->has_gmr = true;
799 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 842 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
814 } 857 }
815 } 858 }
816 859
817 vmw_kms_save_vga(dev_priv); 860 if (dev_priv->has_mob) {
861 spin_lock(&dev_priv->cap_lock);
862 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
863 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
864 spin_unlock(&dev_priv->cap_lock);
865 }
866
818 867
819 /* Start kms and overlay systems, needs fifo. */
820 ret = vmw_kms_init(dev_priv); 868 ret = vmw_kms_init(dev_priv);
821 if (unlikely(ret != 0)) 869 if (unlikely(ret != 0))
822 goto out_no_kms; 870 goto out_no_kms;
823 vmw_overlay_init(dev_priv); 871 vmw_overlay_init(dev_priv);
824 872
873 ret = vmw_request_device(dev_priv);
874 if (ret)
875 goto out_no_fifo;
876
877 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
878
825 if (dev_priv->enable_fb) { 879 if (dev_priv->enable_fb) {
826 ret = vmw_3d_resource_inc(dev_priv, true); 880 vmw_fifo_resource_inc(dev_priv);
827 if (unlikely(ret != 0)) 881 vmw_svga_enable(dev_priv);
828 goto out_no_fifo;
829 vmw_fb_init(dev_priv); 882 vmw_fb_init(dev_priv);
830 } 883 }
831 884
@@ -838,13 +891,14 @@ out_no_fifo:
838 vmw_overlay_close(dev_priv); 891 vmw_overlay_close(dev_priv);
839 vmw_kms_close(dev_priv); 892 vmw_kms_close(dev_priv);
840out_no_kms: 893out_no_kms:
841 vmw_kms_restore_vga(dev_priv);
842 if (dev_priv->has_mob) 894 if (dev_priv->has_mob)
843 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 895 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
844 if (dev_priv->has_gmr) 896 if (dev_priv->has_gmr)
845 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 897 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
846 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 898 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
847out_no_vram: 899out_no_vram:
900 (void)ttm_bo_device_release(&dev_priv->bdev);
901out_no_bdev:
848 vmw_fence_manager_takedown(dev_priv->fman); 902 vmw_fence_manager_takedown(dev_priv->fman);
849out_no_fman: 903out_no_fman:
850 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 904 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@ out_err4:
860 iounmap(dev_priv->mmio_virt); 914 iounmap(dev_priv->mmio_virt);
861out_err3: 915out_err3:
862 arch_phys_wc_del(dev_priv->mmio_mtrr); 916 arch_phys_wc_del(dev_priv->mmio_mtrr);
863 (void)ttm_bo_device_release(&dev_priv->bdev);
864out_err1:
865 vmw_ttm_global_release(dev_priv); 917 vmw_ttm_global_release(dev_priv);
866out_err0: 918out_err0:
867 for (i = vmw_res_context; i < vmw_res_max; ++i) 919 for (i = vmw_res_context; i < vmw_res_max; ++i)
868 idr_destroy(&dev_priv->res_idr[i]); 920 idr_destroy(&dev_priv->res_idr[i]);
869 921
922 if (dev_priv->ctx.staged_bindings)
923 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
870 kfree(dev_priv); 924 kfree(dev_priv);
871 return ret; 925 return ret;
872} 926}
@@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev)
882 drm_ht_remove(&dev_priv->ctx.res_ht); 936 drm_ht_remove(&dev_priv->ctx.res_ht);
883 vfree(dev_priv->ctx.cmd_bounce); 937 vfree(dev_priv->ctx.cmd_bounce);
884 if (dev_priv->enable_fb) { 938 if (dev_priv->enable_fb) {
939 vmw_fb_off(dev_priv);
885 vmw_fb_close(dev_priv); 940 vmw_fb_close(dev_priv);
886 vmw_kms_restore_vga(dev_priv); 941 vmw_fifo_resource_dec(dev_priv);
887 vmw_3d_resource_dec(dev_priv, false); 942 vmw_svga_disable(dev_priv);
888 } 943 }
944
889 vmw_kms_close(dev_priv); 945 vmw_kms_close(dev_priv);
890 vmw_overlay_close(dev_priv); 946 vmw_overlay_close(dev_priv);
891 947
892 if (dev_priv->has_mob)
893 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
894 if (dev_priv->has_gmr) 948 if (dev_priv->has_gmr)
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 949 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
896 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 950 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
897 951
952 vmw_release_device_early(dev_priv);
953 if (dev_priv->has_mob)
954 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
955 (void) ttm_bo_device_release(&dev_priv->bdev);
956 vmw_release_device_late(dev_priv);
898 vmw_fence_manager_takedown(dev_priv->fman); 957 vmw_fence_manager_takedown(dev_priv->fman);
899 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 958 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
900 drm_irq_uninstall(dev_priv->dev); 959 drm_irq_uninstall(dev_priv->dev);
@@ -907,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev)
907 iounmap(dev_priv->mmio_virt); 966 iounmap(dev_priv->mmio_virt);
908 arch_phys_wc_del(dev_priv->mmio_mtrr); 967 arch_phys_wc_del(dev_priv->mmio_mtrr);
909 (void)ttm_bo_device_release(&dev_priv->bdev); 968 (void)ttm_bo_device_release(&dev_priv->bdev);
969 if (dev_priv->ctx.staged_bindings)
970 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
910 vmw_ttm_global_release(dev_priv); 971 vmw_ttm_global_release(dev_priv);
911 972
912 for (i = vmw_res_context; i < vmw_res_max; ++i) 973 for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -1044,17 +1105,27 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1044 const struct drm_ioctl_desc *ioctl = 1105 const struct drm_ioctl_desc *ioctl =
1045 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1106 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1046 1107
1047 if (unlikely(ioctl->cmd != cmd)) { 1108 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1048 DRM_ERROR("Invalid command format, ioctl %d\n", 1109 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1049 nr - DRM_COMMAND_BASE); 1110 if (unlikely(ret != 0))
1050 return -EINVAL; 1111 return ret;
1112
1113 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1114 goto out_io_encoding;
1115
1116 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1117 _IOC_SIZE(cmd));
1051 } 1118 }
1119
1120 if (unlikely(ioctl->cmd != cmd))
1121 goto out_io_encoding;
1122
1052 flags = ioctl->flags; 1123 flags = ioctl->flags;
1053 } else if (!drm_ioctl_flags(nr, &flags)) 1124 } else if (!drm_ioctl_flags(nr, &flags))
1054 return -EINVAL; 1125 return -EINVAL;
1055 1126
1056 vmaster = vmw_master_check(dev, file_priv, flags); 1127 vmaster = vmw_master_check(dev, file_priv, flags);
1057 if (unlikely(IS_ERR(vmaster))) { 1128 if (IS_ERR(vmaster)) {
1058 ret = PTR_ERR(vmaster); 1129 ret = PTR_ERR(vmaster);
1059 1130
1060 if (ret != -ERESTARTSYS) 1131 if (ret != -ERESTARTSYS)
@@ -1068,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1068 ttm_read_unlock(&vmaster->lock); 1139 ttm_read_unlock(&vmaster->lock);
1069 1140
1070 return ret; 1141 return ret;
1142
1143out_io_encoding:
1144 DRM_ERROR("Invalid command format, ioctl %d\n",
1145 nr - DRM_COMMAND_BASE);
1146
1147 return -EINVAL;
1071} 1148}
1072 1149
1073static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1150static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1163,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1086 1163
1087static void vmw_lastclose(struct drm_device *dev) 1164static void vmw_lastclose(struct drm_device *dev)
1088{ 1165{
1089 struct drm_crtc *crtc;
1090 struct drm_mode_set set;
1091 int ret;
1092
1093 set.x = 0;
1094 set.y = 0;
1095 set.fb = NULL;
1096 set.mode = NULL;
1097 set.connectors = NULL;
1098 set.num_connectors = 0;
1099
1100 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1101 set.crtc = crtc;
1102 ret = drm_mode_set_config_internal(&set);
1103 WARN_ON(ret != 0);
1104 }
1105
1106} 1166}
1107 1167
1108static void vmw_master_init(struct vmw_master *vmaster) 1168static void vmw_master_init(struct vmw_master *vmaster)
1109{ 1169{
1110 ttm_lock_init(&vmaster->lock); 1170 ttm_lock_init(&vmaster->lock);
1111 INIT_LIST_HEAD(&vmaster->fb_surf);
1112 mutex_init(&vmaster->fb_surf_mutex);
1113} 1171}
1114 1172
1115static int vmw_master_create(struct drm_device *dev, 1173static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev,
1137 kfree(vmaster); 1195 kfree(vmaster);
1138} 1196}
1139 1197
1140
1141static int vmw_master_set(struct drm_device *dev, 1198static int vmw_master_set(struct drm_device *dev,
1142 struct drm_file *file_priv, 1199 struct drm_file *file_priv,
1143 bool from_open) 1200 bool from_open)
@@ -1148,27 +1205,13 @@ static int vmw_master_set(struct drm_device *dev,
1148 struct vmw_master *vmaster = vmw_master(file_priv->master); 1205 struct vmw_master *vmaster = vmw_master(file_priv->master);
1149 int ret = 0; 1206 int ret = 0;
1150 1207
1151 if (!dev_priv->enable_fb) {
1152 ret = vmw_3d_resource_inc(dev_priv, true);
1153 if (unlikely(ret != 0))
1154 return ret;
1155 vmw_kms_save_vga(dev_priv);
1156 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1157 }
1158
1159 if (active) { 1208 if (active) {
1160 BUG_ON(active != &dev_priv->fbdev_master); 1209 BUG_ON(active != &dev_priv->fbdev_master);
1161 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 1210 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1162 if (unlikely(ret != 0)) 1211 if (unlikely(ret != 0))
1163 goto out_no_active_lock; 1212 return ret;
1164 1213
1165 ttm_lock_set_kill(&active->lock, true, SIGTERM); 1214 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1166 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1167 if (unlikely(ret != 0)) {
1168 DRM_ERROR("Unable to clean VRAM on "
1169 "master drop.\n");
1170 }
1171
1172 dev_priv->active_master = NULL; 1215 dev_priv->active_master = NULL;
1173 } 1216 }
1174 1217
@@ -1182,14 +1225,6 @@ static int vmw_master_set(struct drm_device *dev,
1182 dev_priv->active_master = vmaster; 1225 dev_priv->active_master = vmaster;
1183 1226
1184 return 0; 1227 return 0;
1185
1186out_no_active_lock:
1187 if (!dev_priv->enable_fb) {
1188 vmw_kms_restore_vga(dev_priv);
1189 vmw_3d_resource_dec(dev_priv, true);
1190 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1191 }
1192 return ret;
1193} 1228}
1194 1229
1195static void vmw_master_drop(struct drm_device *dev, 1230static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1249,9 @@ static void vmw_master_drop(struct drm_device *dev,
1214 } 1249 }
1215 1250
1216 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1251 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1217 vmw_execbuf_release_pinned_bo(dev_priv);
1218 1252
1219 if (!dev_priv->enable_fb) { 1253 if (!dev_priv->enable_fb)
1220 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 1254 vmw_svga_disable(dev_priv);
1221 if (unlikely(ret != 0))
1222 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1223 vmw_kms_restore_vga(dev_priv);
1224 vmw_3d_resource_dec(dev_priv, true);
1225 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1226 }
1227 1255
1228 dev_priv->active_master = &dev_priv->fbdev_master; 1256 dev_priv->active_master = &dev_priv->fbdev_master;
1229 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1257 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1261,76 @@ static void vmw_master_drop(struct drm_device *dev,
1233 vmw_fb_on(dev_priv); 1261 vmw_fb_on(dev_priv);
1234} 1262}
1235 1263
1264/**
1265 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1266 *
1267 * @dev_priv: Pointer to device private struct.
1268 * Needs the reservation sem to be held in non-exclusive mode.
1269 */
1270static void __vmw_svga_enable(struct vmw_private *dev_priv)
1271{
1272 spin_lock(&dev_priv->svga_lock);
1273 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1274 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1275 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1276 }
1277 spin_unlock(&dev_priv->svga_lock);
1278}
1279
1280/**
1281 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1282 *
1283 * @dev_priv: Pointer to device private struct.
1284 */
1285void vmw_svga_enable(struct vmw_private *dev_priv)
1286{
1287 ttm_read_lock(&dev_priv->reservation_sem, false);
1288 __vmw_svga_enable(dev_priv);
1289 ttm_read_unlock(&dev_priv->reservation_sem);
1290}
1291
1292/**
1293 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1294 *
1295 * @dev_priv: Pointer to device private struct.
1296 * Needs the reservation sem to be held in exclusive mode.
1297 * Will not empty VRAM. VRAM must be emptied by caller.
1298 */
1299static void __vmw_svga_disable(struct vmw_private *dev_priv)
1300{
1301 spin_lock(&dev_priv->svga_lock);
1302 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1303 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1304 vmw_write(dev_priv, SVGA_REG_ENABLE,
1305 SVGA_REG_ENABLE_HIDE |
1306 SVGA_REG_ENABLE_ENABLE);
1307 }
1308 spin_unlock(&dev_priv->svga_lock);
1309}
1310
1311/**
1312 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1313 * running.
1314 *
1315 * @dev_priv: Pointer to device private struct.
1316 * Will empty VRAM.
1317 */
1318void vmw_svga_disable(struct vmw_private *dev_priv)
1319{
1320 ttm_write_lock(&dev_priv->reservation_sem, false);
1321 spin_lock(&dev_priv->svga_lock);
1322 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1323 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1324 spin_unlock(&dev_priv->svga_lock);
1325 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1326 DRM_ERROR("Failed evicting VRAM buffers.\n");
1327 vmw_write(dev_priv, SVGA_REG_ENABLE,
1328 SVGA_REG_ENABLE_HIDE |
1329 SVGA_REG_ENABLE_ENABLE);
1330 } else
1331 spin_unlock(&dev_priv->svga_lock);
1332 ttm_write_unlock(&dev_priv->reservation_sem);
1333}
1236 1334
1237static void vmw_remove(struct pci_dev *pdev) 1335static void vmw_remove(struct pci_dev *pdev)
1238{ 1336{
@@ -1250,23 +1348,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1250 1348
1251 switch (val) { 1349 switch (val) {
1252 case PM_HIBERNATION_PREPARE: 1350 case PM_HIBERNATION_PREPARE:
1253 case PM_SUSPEND_PREPARE: 1351 if (dev_priv->enable_fb)
1352 vmw_fb_off(dev_priv);
1254 ttm_suspend_lock(&dev_priv->reservation_sem); 1353 ttm_suspend_lock(&dev_priv->reservation_sem);
1255 1354
1256 /** 1355 /*
1257 * This empties VRAM and unbinds all GMR bindings. 1356 * This empties VRAM and unbinds all GMR bindings.
1258 * Buffer contents is moved to swappable memory. 1357 * Buffer contents is moved to swappable memory.
1259 */ 1358 */
1260 vmw_execbuf_release_pinned_bo(dev_priv); 1359 vmw_execbuf_release_pinned_bo(dev_priv);
1261 vmw_resource_evict_all(dev_priv); 1360 vmw_resource_evict_all(dev_priv);
1361 vmw_release_device_early(dev_priv);
1262 ttm_bo_swapout_all(&dev_priv->bdev); 1362 ttm_bo_swapout_all(&dev_priv->bdev);
1263 1363 vmw_fence_fifo_down(dev_priv->fman);
1264 break; 1364 break;
1265 case PM_POST_HIBERNATION: 1365 case PM_POST_HIBERNATION:
1266 case PM_POST_SUSPEND:
1267 case PM_POST_RESTORE: 1366 case PM_POST_RESTORE:
1367 vmw_fence_fifo_up(dev_priv->fman);
1268 ttm_suspend_unlock(&dev_priv->reservation_sem); 1368 ttm_suspend_unlock(&dev_priv->reservation_sem);
1269 1369 if (dev_priv->enable_fb)
1370 vmw_fb_on(dev_priv);
1270 break; 1371 break;
1271 case PM_RESTORE_PREPARE: 1372 case PM_RESTORE_PREPARE:
1272 break; 1373 break;
@@ -1276,20 +1377,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1276 return 0; 1377 return 0;
1277} 1378}
1278 1379
1279/**
1280 * These might not be needed with the virtual SVGA device.
1281 */
1282
1283static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1380static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1284{ 1381{
1285 struct drm_device *dev = pci_get_drvdata(pdev); 1382 struct drm_device *dev = pci_get_drvdata(pdev);
1286 struct vmw_private *dev_priv = vmw_priv(dev); 1383 struct vmw_private *dev_priv = vmw_priv(dev);
1287 1384
1288 if (dev_priv->num_3d_resources != 0) { 1385 if (dev_priv->refuse_hibernation)
1289 DRM_INFO("Can't suspend or hibernate "
1290 "while 3D resources are active.\n");
1291 return -EBUSY; 1386 return -EBUSY;
1292 }
1293 1387
1294 pci_save_state(pdev); 1388 pci_save_state(pdev);
1295 pci_disable_device(pdev); 1389 pci_disable_device(pdev);
@@ -1321,56 +1415,62 @@ static int vmw_pm_resume(struct device *kdev)
1321 return vmw_pci_resume(pdev); 1415 return vmw_pci_resume(pdev);
1322} 1416}
1323 1417
1324static int vmw_pm_prepare(struct device *kdev) 1418static int vmw_pm_freeze(struct device *kdev)
1325{ 1419{
1326 struct pci_dev *pdev = to_pci_dev(kdev); 1420 struct pci_dev *pdev = to_pci_dev(kdev);
1327 struct drm_device *dev = pci_get_drvdata(pdev); 1421 struct drm_device *dev = pci_get_drvdata(pdev);
1328 struct vmw_private *dev_priv = vmw_priv(dev); 1422 struct vmw_private *dev_priv = vmw_priv(dev);
1329 1423
1330 /**
1331 * Release 3d reference held by fbdev and potentially
1332 * stop fifo.
1333 */
1334 dev_priv->suspended = true; 1424 dev_priv->suspended = true;
1335 if (dev_priv->enable_fb) 1425 if (dev_priv->enable_fb)
1336 vmw_3d_resource_dec(dev_priv, true); 1426 vmw_fifo_resource_dec(dev_priv);
1337
1338 if (dev_priv->num_3d_resources != 0) {
1339
1340 DRM_INFO("Can't suspend or hibernate "
1341 "while 3D resources are active.\n");
1342 1427
1428 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1429 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1343 if (dev_priv->enable_fb) 1430 if (dev_priv->enable_fb)
1344 vmw_3d_resource_inc(dev_priv, true); 1431 vmw_fifo_resource_inc(dev_priv);
1432 WARN_ON(vmw_request_device_late(dev_priv));
1345 dev_priv->suspended = false; 1433 dev_priv->suspended = false;
1346 return -EBUSY; 1434 return -EBUSY;
1347 } 1435 }
1348 1436
1437 if (dev_priv->enable_fb)
1438 __vmw_svga_disable(dev_priv);
1439
1440 vmw_release_device_late(dev_priv);
1441
1349 return 0; 1442 return 0;
1350} 1443}
1351 1444
1352static void vmw_pm_complete(struct device *kdev) 1445static int vmw_pm_restore(struct device *kdev)
1353{ 1446{
1354 struct pci_dev *pdev = to_pci_dev(kdev); 1447 struct pci_dev *pdev = to_pci_dev(kdev);
1355 struct drm_device *dev = pci_get_drvdata(pdev); 1448 struct drm_device *dev = pci_get_drvdata(pdev);
1356 struct vmw_private *dev_priv = vmw_priv(dev); 1449 struct vmw_private *dev_priv = vmw_priv(dev);
1450 int ret;
1357 1451
1358 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1452 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1359 (void) vmw_read(dev_priv, SVGA_REG_ID); 1453 (void) vmw_read(dev_priv, SVGA_REG_ID);
1360 1454
1361 /**
1362 * Reclaim 3d reference held by fbdev and potentially
1363 * start fifo.
1364 */
1365 if (dev_priv->enable_fb) 1455 if (dev_priv->enable_fb)
1366 vmw_3d_resource_inc(dev_priv, false); 1456 vmw_fifo_resource_inc(dev_priv);
1457
1458 ret = vmw_request_device(dev_priv);
1459 if (ret)
1460 return ret;
1461
1462 if (dev_priv->enable_fb)
1463 __vmw_svga_enable(dev_priv);
1367 1464
1368 dev_priv->suspended = false; 1465 dev_priv->suspended = false;
1466
1467 return 0;
1369} 1468}
1370 1469
1371static const struct dev_pm_ops vmw_pm_ops = { 1470static const struct dev_pm_ops vmw_pm_ops = {
1372 .prepare = vmw_pm_prepare, 1471 .freeze = vmw_pm_freeze,
1373 .complete = vmw_pm_complete, 1472 .thaw = vmw_pm_restore,
1473 .restore = vmw_pm_restore,
1374 .suspend = vmw_pm_suspend, 1474 .suspend = vmw_pm_suspend,
1375 .resume = vmw_pm_resume, 1475 .resume = vmw_pm_resume,
1376}; 1476};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d26a6daa9719..8f40692cf48a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
40#include <drm/ttm/ttm_module.h> 40#include <drm/ttm/ttm_module.h>
41#include "vmwgfx_fence.h" 41#include "vmwgfx_fence.h"
42 42
43#define VMWGFX_DRIVER_DATE "20140704" 43#define VMWGFX_DRIVER_DATE "20150810"
44#define VMWGFX_DRIVER_MAJOR 2 44#define VMWGFX_DRIVER_MAJOR 2
45#define VMWGFX_DRIVER_MINOR 6 45#define VMWGFX_DRIVER_MINOR 9
46#define VMWGFX_DRIVER_PATCHLEVEL 1 46#define VMWGFX_DRIVER_PATCHLEVEL 0
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048 49#define VMWGFX_MAX_RELOCATIONS 2048
50#define VMWGFX_MAX_VALIDATIONS 2048 50#define VMWGFX_MAX_VALIDATIONS 2048
51#define VMWGFX_MAX_DISPLAYS 16 51#define VMWGFX_MAX_DISPLAYS 16
52#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 52#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 53#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
54 54
55/* 55/*
56 * Perhaps we should have sysfs entries for these. 56 * Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
59#define VMWGFX_NUM_GB_SHADER 20000 59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768 60#define VMWGFX_NUM_GB_SURFACE 32768
61#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 61#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62#define VMWGFX_NUM_DXCONTEXT 256
63#define VMWGFX_NUM_DXQUERY 512
62#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 64#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 VMWGFX_NUM_GB_SHADER +\ 65 VMWGFX_NUM_GB_SHADER +\
64 VMWGFX_NUM_GB_SURFACE +\ 66 VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@ struct vmw_fpriv {
85struct vmw_dma_buffer { 87struct vmw_dma_buffer {
86 struct ttm_buffer_object base; 88 struct ttm_buffer_object base;
87 struct list_head res_list; 89 struct list_head res_list;
90 s32 pin_count;
91 /* Not ref-counted. Protected by binding_mutex */
92 struct vmw_resource *dx_query_ctx;
88}; 93};
89 94
90/** 95/**
@@ -113,6 +118,7 @@ struct vmw_resource {
113 bool backup_dirty; /* Protected by backup buffer reserved */ 118 bool backup_dirty; /* Protected by backup buffer reserved */
114 struct vmw_dma_buffer *backup; 119 struct vmw_dma_buffer *backup;
115 unsigned long backup_offset; 120 unsigned long backup_offset;
121 unsigned long pin_count; /* Protected by resource reserved */
116 const struct vmw_res_func *func; 122 const struct vmw_res_func *func;
117 struct list_head lru_head; /* Protected by the resource lock */ 123 struct list_head lru_head; /* Protected by the resource lock */
118 struct list_head mob_head; /* Protected by @backup reserved */ 124 struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@ enum vmw_res_type {
130 vmw_res_surface, 136 vmw_res_surface,
131 vmw_res_stream, 137 vmw_res_stream,
132 vmw_res_shader, 138 vmw_res_shader,
139 vmw_res_dx_context,
140 vmw_res_cotable,
141 vmw_res_view,
133 vmw_res_max 142 vmw_res_max
134}; 143};
135 144
@@ -137,7 +146,8 @@ enum vmw_res_type {
137 * Resources that are managed using command streams. 146 * Resources that are managed using command streams.
138 */ 147 */
139enum vmw_cmdbuf_res_type { 148enum vmw_cmdbuf_res_type {
140 vmw_cmdbuf_res_compat_shader 149 vmw_cmdbuf_res_shader,
150 vmw_cmdbuf_res_view
141}; 151};
142 152
143struct vmw_cmdbuf_res_manager; 153struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@ struct vmw_surface {
160 struct drm_vmw_size *sizes; 170 struct drm_vmw_size *sizes;
161 uint32_t num_sizes; 171 uint32_t num_sizes;
162 bool scanout; 172 bool scanout;
173 uint32_t array_size;
163 /* TODO so far just a extra pointer */ 174 /* TODO so far just a extra pointer */
164 struct vmw_cursor_snooper snooper; 175 struct vmw_cursor_snooper snooper;
165 struct vmw_surface_offset *offsets; 176 struct vmw_surface_offset *offsets;
166 SVGA3dTextureFilter autogen_filter; 177 SVGA3dTextureFilter autogen_filter;
167 uint32_t multisample_count; 178 uint32_t multisample_count;
179 struct list_head view_list;
168}; 180};
169 181
170struct vmw_marker_queue { 182struct vmw_marker_queue {
@@ -176,14 +188,15 @@ struct vmw_marker_queue {
176 188
177struct vmw_fifo_state { 189struct vmw_fifo_state {
178 unsigned long reserved_size; 190 unsigned long reserved_size;
179 __le32 *dynamic_buffer; 191 u32 *dynamic_buffer;
180 __le32 *static_buffer; 192 u32 *static_buffer;
181 unsigned long static_buffer_size; 193 unsigned long static_buffer_size;
182 bool using_bounce_buffer; 194 bool using_bounce_buffer;
183 uint32_t capabilities; 195 uint32_t capabilities;
184 struct mutex fifo_mutex; 196 struct mutex fifo_mutex;
185 struct rw_semaphore rwsem; 197 struct rw_semaphore rwsem;
186 struct vmw_marker_queue marker_queue; 198 struct vmw_marker_queue marker_queue;
199 bool dx;
187}; 200};
188 201
189struct vmw_relocation { 202struct vmw_relocation {
@@ -264,70 +277,15 @@ struct vmw_piter {
264}; 277};
265 278
266/* 279/*
267 * enum vmw_ctx_binding_type - abstract resource to context binding types 280 * enum vmw_display_unit_type - Describes the display unit
268 */ 281 */
269enum vmw_ctx_binding_type { 282enum vmw_display_unit_type {
270 vmw_ctx_binding_shader, 283 vmw_du_invalid = 0,
271 vmw_ctx_binding_rt, 284 vmw_du_legacy,
272 vmw_ctx_binding_tex, 285 vmw_du_screen_object,
273 vmw_ctx_binding_max 286 vmw_du_screen_target
274}; 287};
275 288
276/**
277 * struct vmw_ctx_bindinfo - structure representing a single context binding
278 *
279 * @ctx: Pointer to the context structure. NULL means the binding is not
280 * active.
281 * @res: Non ref-counted pointer to the bound resource.
282 * @bt: The binding type.
283 * @i1: Union of information needed to unbind.
284 */
285struct vmw_ctx_bindinfo {
286 struct vmw_resource *ctx;
287 struct vmw_resource *res;
288 enum vmw_ctx_binding_type bt;
289 bool scrubbed;
290 union {
291 SVGA3dShaderType shader_type;
292 SVGA3dRenderTargetType rt_type;
293 uint32 texture_stage;
294 } i1;
295};
296
297/**
298 * struct vmw_ctx_binding - structure representing a single context binding
299 * - suitable for tracking in a context
300 *
301 * @ctx_list: List head for context.
302 * @res_list: List head for bound resource.
303 * @bi: Binding info
304 */
305struct vmw_ctx_binding {
306 struct list_head ctx_list;
307 struct list_head res_list;
308 struct vmw_ctx_bindinfo bi;
309};
310
311
312/**
313 * struct vmw_ctx_binding_state - context binding state
314 *
315 * @list: linked list of individual bindings.
316 * @render_targets: Render target bindings.
317 * @texture_units: Texture units/samplers bindings.
318 * @shaders: Shader bindings.
319 *
320 * Note that this structure also provides storage space for the individual
321 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
322 * for individual bindings.
323 *
324 */
325struct vmw_ctx_binding_state {
326 struct list_head list;
327 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
328 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
329 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
330};
331 289
332struct vmw_sw_context{ 290struct vmw_sw_context{
333 struct drm_open_hash res_ht; 291 struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@ struct vmw_sw_context{
342 uint32_t *cmd_bounce; 300 uint32_t *cmd_bounce;
343 uint32_t cmd_bounce_size; 301 uint32_t cmd_bounce_size;
344 struct list_head resource_list; 302 struct list_head resource_list;
345 struct ttm_buffer_object *cur_query_bo; 303 struct list_head ctx_resource_list; /* For contexts and cotables */
304 struct vmw_dma_buffer *cur_query_bo;
346 struct list_head res_relocations; 305 struct list_head res_relocations;
347 uint32_t *buf_start; 306 uint32_t *buf_start;
348 struct vmw_res_cache_entry res_cache[vmw_res_max]; 307 struct vmw_res_cache_entry res_cache[vmw_res_max];
349 struct vmw_resource *last_query_ctx; 308 struct vmw_resource *last_query_ctx;
350 bool needs_post_query_barrier; 309 bool needs_post_query_barrier;
351 struct vmw_resource *error_resource; 310 struct vmw_resource *error_resource;
352 struct vmw_ctx_binding_state staged_bindings; 311 struct vmw_ctx_binding_state *staged_bindings;
312 bool staged_bindings_inuse;
353 struct list_head staged_cmd_res; 313 struct list_head staged_cmd_res;
314 struct vmw_resource_val_node *dx_ctx_node;
315 struct vmw_dma_buffer *dx_query_mob;
316 struct vmw_resource *dx_query_ctx;
317 struct vmw_cmdbuf_res_manager *man;
354}; 318};
355 319
356struct vmw_legacy_display; 320struct vmw_legacy_display;
@@ -358,8 +322,6 @@ struct vmw_overlay;
358 322
359struct vmw_master { 323struct vmw_master {
360 struct ttm_lock lock; 324 struct ttm_lock lock;
361 struct mutex fb_surf_mutex;
362 struct list_head fb_surf;
363}; 325};
364 326
365struct vmw_vga_topology_state { 327struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@ struct vmw_vga_topology_state {
370 uint32_t pos_y; 332 uint32_t pos_y;
371}; 333};
372 334
335
336/*
337 * struct vmw_otable - Guest Memory OBject table metadata
338 *
339 * @size: Size of the table (page-aligned).
340 * @page_table: Pointer to a struct vmw_mob holding the page table.
341 */
342struct vmw_otable {
343 unsigned long size;
344 struct vmw_mob *page_table;
345 bool enabled;
346};
347
348struct vmw_otable_batch {
349 unsigned num_otables;
350 struct vmw_otable *otables;
351 struct vmw_resource *context;
352 struct ttm_buffer_object *otable_bo;
353};
354
373struct vmw_private { 355struct vmw_private {
374 struct ttm_bo_device bdev; 356 struct ttm_bo_device bdev;
375 struct ttm_bo_global_ref bo_global_ref; 357 struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@ struct vmw_private {
387 uint32_t mmio_size; 369 uint32_t mmio_size;
388 uint32_t fb_max_width; 370 uint32_t fb_max_width;
389 uint32_t fb_max_height; 371 uint32_t fb_max_height;
372 uint32_t texture_max_width;
373 uint32_t texture_max_height;
374 uint32_t stdu_max_width;
375 uint32_t stdu_max_height;
390 uint32_t initial_width; 376 uint32_t initial_width;
391 uint32_t initial_height; 377 uint32_t initial_height;
392 __le32 __iomem *mmio_virt; 378 u32 __iomem *mmio_virt;
393 int mmio_mtrr; 379 int mmio_mtrr;
394 uint32_t capabilities; 380 uint32_t capabilities;
395 uint32_t max_gmr_ids; 381 uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@ struct vmw_private {
401 bool has_mob; 387 bool has_mob;
402 spinlock_t hw_lock; 388 spinlock_t hw_lock;
403 spinlock_t cap_lock; 389 spinlock_t cap_lock;
390 bool has_dx;
404 391
405 /* 392 /*
406 * VGA registers. 393 * VGA registers.
@@ -420,6 +407,7 @@ struct vmw_private {
420 */ 407 */
421 408
422 void *fb_info; 409 void *fb_info;
410 enum vmw_display_unit_type active_display_unit;
423 struct vmw_legacy_display *ldu_priv; 411 struct vmw_legacy_display *ldu_priv;
424 struct vmw_screen_object_display *sou_priv; 412 struct vmw_screen_object_display *sou_priv;
425 struct vmw_overlay *overlay_priv; 413 struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@ struct vmw_private {
453 spinlock_t waiter_lock; 441 spinlock_t waiter_lock;
454 int fence_queue_waiters; /* Protected by waiter_lock */ 442 int fence_queue_waiters; /* Protected by waiter_lock */
455 int goal_queue_waiters; /* Protected by waiter_lock */ 443 int goal_queue_waiters; /* Protected by waiter_lock */
444 int cmdbuf_waiters; /* Protected by irq_lock */
445 int error_waiters; /* Protected by irq_lock */
456 atomic_t fifo_queue_waiters; 446 atomic_t fifo_queue_waiters;
457 uint32_t last_read_seqno; 447 uint32_t last_read_seqno;
458 spinlock_t irq_lock; 448 spinlock_t irq_lock;
@@ -484,6 +474,7 @@ struct vmw_private {
484 474
485 bool stealth; 475 bool stealth;
486 bool enable_fb; 476 bool enable_fb;
477 spinlock_t svga_lock;
487 478
488 /** 479 /**
489 * Master management. 480 * Master management.
@@ -493,9 +484,10 @@ struct vmw_private {
493 struct vmw_master fbdev_master; 484 struct vmw_master fbdev_master;
494 struct notifier_block pm_nb; 485 struct notifier_block pm_nb;
495 bool suspended; 486 bool suspended;
487 bool refuse_hibernation;
496 488
497 struct mutex release_mutex; 489 struct mutex release_mutex;
498 uint32_t num_3d_resources; 490 atomic_t num_fifo_resources;
499 491
500 /* 492 /*
501 * Replace this with an rwsem as soon as we have down_xx_interruptible() 493 * Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@ struct vmw_private {
507 * are protected by the cmdbuf mutex. 499 * are protected by the cmdbuf mutex.
508 */ 500 */
509 501
510 struct ttm_buffer_object *dummy_query_bo; 502 struct vmw_dma_buffer *dummy_query_bo;
511 struct ttm_buffer_object *pinned_bo; 503 struct vmw_dma_buffer *pinned_bo;
512 uint32_t query_cid; 504 uint32_t query_cid;
513 uint32_t query_cid_valid; 505 uint32_t query_cid_valid;
514 bool dummy_query_bo_pinned; 506 bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@ struct vmw_private {
531 /* 523 /*
532 * Guest Backed stuff 524 * Guest Backed stuff
533 */ 525 */
534 struct ttm_buffer_object *otable_bo; 526 struct vmw_otable_batch otable_batch;
535 struct vmw_otable *otables; 527
528 struct vmw_cmdbuf_man *cman;
536}; 529};
537 530
538static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 531static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
587 return val; 580 return val;
588} 581}
589 582
590int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga); 583extern void vmw_svga_enable(struct vmw_private *dev_priv);
591void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); 584extern void vmw_svga_disable(struct vmw_private *dev_priv);
585
592 586
593/** 587/**
594 * GMR utilities - vmwgfx_gmr.c 588 * GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
610extern struct vmw_resource * 604extern struct vmw_resource *
611vmw_resource_reference_unless_doomed(struct vmw_resource *res); 605vmw_resource_reference_unless_doomed(struct vmw_resource *res);
612extern int vmw_resource_validate(struct vmw_resource *res); 606extern int vmw_resource_validate(struct vmw_resource *res);
613extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 607extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
608 bool no_backup);
614extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 609extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
615extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 610extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
616 struct ttm_object_file *tfile, 611 struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
660 uint32_t *inout_id, 655 uint32_t *inout_id,
661 struct vmw_resource **out); 656 struct vmw_resource **out);
662extern void vmw_resource_unreserve(struct vmw_resource *res, 657extern void vmw_resource_unreserve(struct vmw_resource *res,
658 bool switch_backup,
663 struct vmw_dma_buffer *new_backup, 659 struct vmw_dma_buffer *new_backup,
664 unsigned long new_backup_offset); 660 unsigned long new_backup_offset);
665extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, 661extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
666 struct ttm_mem_reg *mem); 662 struct ttm_mem_reg *mem);
663extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
664 struct ttm_mem_reg *mem);
665extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
667extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, 666extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
668 struct vmw_fence_obj *fence); 667 struct vmw_fence_obj *fence);
669extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 668extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
671/** 670/**
672 * DMA buffer helper routines - vmwgfx_dmabuf.c 671 * DMA buffer helper routines - vmwgfx_dmabuf.c
673 */ 672 */
674extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, 673extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
675 struct vmw_dma_buffer *bo,
676 struct ttm_placement *placement,
677 bool interruptible);
678extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
679 struct vmw_dma_buffer *buf,
680 bool pin, bool interruptible);
681extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
682 struct vmw_dma_buffer *buf,
683 bool pin, bool interruptible);
684extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
685 struct vmw_dma_buffer *bo, 674 struct vmw_dma_buffer *bo,
686 bool pin, bool interruptible); 675 struct ttm_placement *placement,
676 bool interruptible);
677extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
678 struct vmw_dma_buffer *buf,
679 bool interruptible);
680extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
681 struct vmw_dma_buffer *buf,
682 bool interruptible);
683extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
684 struct vmw_dma_buffer *bo,
685 bool interruptible);
687extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, 686extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
688 struct vmw_dma_buffer *bo, 687 struct vmw_dma_buffer *bo,
689 bool interruptible); 688 bool interruptible);
690extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 689extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
691 SVGAGuestPtr *ptr); 690 SVGAGuestPtr *ptr);
692extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); 691extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
693 692
694/** 693/**
695 * Misc Ioctl functionality - vmwgfx_ioctl.c 694 * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
717extern void vmw_fifo_release(struct vmw_private *dev_priv, 716extern void vmw_fifo_release(struct vmw_private *dev_priv,
718 struct vmw_fifo_state *fifo); 717 struct vmw_fifo_state *fifo);
719extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 718extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
719extern void *
720vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
720extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 721extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
722extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
721extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 723extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
722 uint32_t *seqno); 724 uint32_t *seqno);
723extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); 725extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
726extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 728extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
727extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 729extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
728 uint32_t cid); 730 uint32_t cid);
731extern int vmw_fifo_flush(struct vmw_private *dev_priv,
732 bool interruptible);
729 733
730/** 734/**
731 * TTM glue - vmwgfx_ttm_glue.c 735 * TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
750extern struct ttm_placement vmw_evictable_placement; 754extern struct ttm_placement vmw_evictable_placement;
751extern struct ttm_placement vmw_srf_placement; 755extern struct ttm_placement vmw_srf_placement;
752extern struct ttm_placement vmw_mob_placement; 756extern struct ttm_placement vmw_mob_placement;
757extern struct ttm_placement vmw_mob_ne_placement;
753extern struct ttm_bo_driver vmw_bo_driver; 758extern struct ttm_bo_driver vmw_bo_driver;
754extern int vmw_dma_quiescent(struct drm_device *dev); 759extern int vmw_dma_quiescent(struct drm_device *dev);
755extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); 760extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
800 * Command submission - vmwgfx_execbuf.c 805 * Command submission - vmwgfx_execbuf.c
801 */ 806 */
802 807
803extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 808extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
804 struct drm_file *file_priv); 809 struct drm_file *file_priv, size_t size);
805extern int vmw_execbuf_process(struct drm_file *file_priv, 810extern int vmw_execbuf_process(struct drm_file *file_priv,
806 struct vmw_private *dev_priv, 811 struct vmw_private *dev_priv,
807 void __user *user_commands, 812 void __user *user_commands,
808 void *kernel_commands, 813 void *kernel_commands,
809 uint32_t command_size, 814 uint32_t command_size,
810 uint64_t throttle_us, 815 uint64_t throttle_us,
816 uint32_t dx_context_handle,
811 struct drm_vmw_fence_rep __user 817 struct drm_vmw_fence_rep __user
812 *user_fence_rep, 818 *user_fence_rep,
813 struct vmw_fence_obj **out_fence); 819 struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
826 *user_fence_rep, 832 *user_fence_rep,
827 struct vmw_fence_obj *fence, 833 struct vmw_fence_obj *fence,
828 uint32_t fence_handle); 834 uint32_t fence_handle);
835extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
836 struct ttm_buffer_object *bo,
837 bool interruptible,
838 bool validate_as_mob);
839
829 840
830/** 841/**
831 * IRQs and wating - vmwgfx_irq.c 842 * IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
833 844
834extern irqreturn_t vmw_irq_handler(int irq, void *arg); 845extern irqreturn_t vmw_irq_handler(int irq, void *arg);
835extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 846extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
836 uint32_t seqno, bool interruptible, 847 uint32_t seqno, bool interruptible,
837 unsigned long timeout); 848 unsigned long timeout);
838extern void vmw_irq_preinstall(struct drm_device *dev); 849extern void vmw_irq_preinstall(struct drm_device *dev);
839extern int vmw_irq_postinstall(struct drm_device *dev); 850extern int vmw_irq_postinstall(struct drm_device *dev);
840extern void vmw_irq_uninstall(struct drm_device *dev); 851extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
852extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 863extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
853extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 864extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
854extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 865extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
866extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
867 int *waiter_count);
868extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
869 u32 flag, int *waiter_count);
855 870
856/** 871/**
857 * Rudimentary fence-like objects currently used only for throttling - 872 * Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
861extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); 876extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
862extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); 877extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
863extern int vmw_marker_push(struct vmw_marker_queue *queue, 878extern int vmw_marker_push(struct vmw_marker_queue *queue,
864 uint32_t seqno); 879 uint32_t seqno);
865extern int vmw_marker_pull(struct vmw_marker_queue *queue, 880extern int vmw_marker_pull(struct vmw_marker_queue *queue,
866 uint32_t signaled_seqno); 881 uint32_t signaled_seqno);
867extern int vmw_wait_lag(struct vmw_private *dev_priv, 882extern int vmw_wait_lag(struct vmw_private *dev_priv,
868 struct vmw_marker_queue *queue, uint32_t us); 883 struct vmw_marker_queue *queue, uint32_t us);
869 884
@@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
908 uint32_t sid, int32_t destX, int32_t destY, 923 uint32_t sid, int32_t destX, int32_t destY,
909 struct drm_vmw_rect *clips, 924 struct drm_vmw_rect *clips,
910 uint32_t num_clips); 925 uint32_t num_clips);
911int vmw_kms_readback(struct vmw_private *dev_priv,
912 struct drm_file *file_priv,
913 struct vmw_framebuffer *vfb,
914 struct drm_vmw_fence_rep __user *user_fence_rep,
915 struct drm_vmw_rect *clips,
916 uint32_t num_clips);
917int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv); 927 struct drm_file *file_priv);
919 928
@@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
927int vmw_dumb_destroy(struct drm_file *file_priv, 936int vmw_dumb_destroy(struct drm_file *file_priv,
928 struct drm_device *dev, 937 struct drm_device *dev,
929 uint32_t handle); 938 uint32_t handle);
939extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
940extern void vmw_resource_unpin(struct vmw_resource *res);
941extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
942
930/** 943/**
931 * Overlay control - vmwgfx_overlay.c 944 * Overlay control - vmwgfx_overlay.c
932 */ 945 */
@@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
982 995
983extern const struct vmw_user_resource_conv *user_context_converter; 996extern const struct vmw_user_resource_conv *user_context_converter;
984 997
985extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
986
987extern int vmw_context_check(struct vmw_private *dev_priv, 998extern int vmw_context_check(struct vmw_private *dev_priv,
988 struct ttm_object_file *tfile, 999 struct ttm_object_file *tfile,
989 int id, 1000 int id,
990 struct vmw_resource **p_res); 1001 struct vmw_resource **p_res);
991extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1002extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv); 1003 struct drm_file *file_priv);
1004extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1005 struct drm_file *file_priv);
993extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1006extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
994 struct drm_file *file_priv); 1007 struct drm_file *file_priv);
995extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
996 const struct vmw_ctx_bindinfo *ci);
997extern void
998vmw_context_binding_state_transfer(struct vmw_resource *res,
999 struct vmw_ctx_binding_state *cbs);
1000extern void vmw_context_binding_res_list_kill(struct list_head *head);
1001extern void vmw_context_binding_res_list_scrub(struct list_head *head);
1002extern int vmw_context_rebind_all(struct vmw_resource *ctx);
1003extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1008extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1004extern struct vmw_cmdbuf_res_manager * 1009extern struct vmw_cmdbuf_res_manager *
1005vmw_context_res_man(struct vmw_resource *ctx); 1010vmw_context_res_man(struct vmw_resource *ctx);
1011extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1012 SVGACOTableType cotable_type);
1013extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1014struct vmw_ctx_binding_state;
1015extern struct vmw_ctx_binding_state *
1016vmw_context_binding_state(struct vmw_resource *ctx);
1017extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1018 bool readback);
1019extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1020 struct vmw_dma_buffer *mob);
1021extern struct vmw_dma_buffer *
1022vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1023
1024
1006/* 1025/*
1007 * Surface management - vmwgfx_surface.c 1026 * Surface management - vmwgfx_surface.c
1008 */ 1027 */
@@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
1025 uint32_t handle, int *id); 1044 uint32_t handle, int *id);
1026extern int vmw_surface_validate(struct vmw_private *dev_priv, 1045extern int vmw_surface_validate(struct vmw_private *dev_priv,
1027 struct vmw_surface *srf); 1046 struct vmw_surface *srf);
1047int vmw_surface_gb_priv_define(struct drm_device *dev,
1048 uint32_t user_accounting_size,
1049 uint32_t svga3d_flags,
1050 SVGA3dSurfaceFormat format,
1051 bool for_scanout,
1052 uint32_t num_mip_levels,
1053 uint32_t multisample_count,
1054 uint32_t array_size,
1055 struct drm_vmw_size size,
1056 struct vmw_surface **srf_out);
1028 1057
1029/* 1058/*
1030 * Shader management - vmwgfx_shader.c 1059 * Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1042 SVGA3dShaderType shader_type, 1071 SVGA3dShaderType shader_type,
1043 size_t size, 1072 size_t size,
1044 struct list_head *list); 1073 struct list_head *list);
1045extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 1074extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1046 u32 user_key, SVGA3dShaderType shader_type, 1075 u32 user_key, SVGA3dShaderType shader_type,
1047 struct list_head *list); 1076 struct list_head *list);
1077extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1078 struct vmw_resource *ctx,
1079 u32 user_key,
1080 SVGA3dShaderType shader_type,
1081 struct list_head *list);
1082extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1083 struct list_head *list,
1084 bool readback);
1085
1048extern struct vmw_resource * 1086extern struct vmw_resource *
1049vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1087vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1050 u32 user_key, SVGA3dShaderType shader_type); 1088 u32 user_key, SVGA3dShaderType shader_type);
1051 1089
1052/* 1090/*
1053 * Command buffer managed resources - vmwgfx_cmdbuf_res.c 1091 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1071extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1109extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1072 enum vmw_cmdbuf_res_type res_type, 1110 enum vmw_cmdbuf_res_type res_type,
1073 u32 user_key, 1111 u32 user_key,
1074 struct list_head *list); 1112 struct list_head *list,
1113 struct vmw_resource **res);
1114
1115/*
1116 * COTable management - vmwgfx_cotable.c
1117 */
1118extern const SVGACOTableType vmw_cotable_scrub_order[];
1119extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1120 struct vmw_resource *ctx,
1121 u32 type);
1122extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1123extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1124extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1125 struct list_head *head);
1126
1127/*
1128 * Command buffer managerment vmwgfx_cmdbuf.c
1129 */
1130struct vmw_cmdbuf_man;
1131struct vmw_cmdbuf_header;
1132
1133extern struct vmw_cmdbuf_man *
1134vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1135extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1136 size_t size, size_t default_size);
1137extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1138extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1139extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1140 unsigned long timeout);
1141extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1142 int ctx_id, bool interruptible,
1143 struct vmw_cmdbuf_header *header);
1144extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1145 struct vmw_cmdbuf_header *header,
1146 bool flush);
1147extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1148extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1149 size_t size, bool interruptible,
1150 struct vmw_cmdbuf_header **p_header);
1151extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1152extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1153 bool interruptible);
1075 1154
1076 1155
1077/** 1156/**
@@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1116{ 1195{
1117 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; 1196 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1118} 1197}
1198
1199static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1200{
1201 atomic_inc(&dev_priv->num_fifo_resources);
1202}
1203
1204static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1205{
1206 atomic_dec(&dev_priv->num_fifo_resources);
1207}
1119#endif 1208#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..b56565457c96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
29#include "vmwgfx_reg.h" 29#include "vmwgfx_reg.h"
30#include <drm/ttm/ttm_bo_api.h> 30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
32 34
33#define VMW_RES_HT_ORDER 12 35#define VMW_RES_HT_ORDER 12
34 36
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in 62 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream. 63 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on 64 * @switching_backup: The command stream provides a new backup buffer for a
63 * reservation. The command stream will provide one. 65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
64 */ 69 */
65struct vmw_resource_val_node { 70struct vmw_resource_val_node {
66 struct list_head head; 71 struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
69 struct vmw_dma_buffer *new_backup; 74 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings; 75 struct vmw_ctx_binding_state *staged_bindings;
71 unsigned long new_backup_offset; 76 unsigned long new_backup_offset;
72 bool first_usage; 77 u32 first_usage : 1;
73 bool no_buffer_needed; 78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
74}; 80};
75 81
76/** 82/**
@@ -92,22 +98,40 @@ struct vmw_cmd_entry {
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)} 99 (_gb_disable), (_gb_enable)}
94 100
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 struct vmw_sw_context *sw_context,
106 SVGAMobId *id,
107 struct vmw_dma_buffer **vmw_bo_p);
108static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob,
111 uint32_t *p_val_node);
112
113
95/** 114/**
96 * vmw_resource_unreserve - unreserve resources previously reserved for 115 * vmw_resources_unreserve - unreserve resources previously reserved for
97 * command submission. 116 * command submission.
98 * 117 *
99 * @list_head: list of resources to unreserve. 118 * @sw_context: pointer to the software context
100 * @backoff: Whether command submission failed. 119 * @backoff: Whether command submission failed.
101 */ 120 */
102static void vmw_resource_list_unreserve(struct list_head *list, 121static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
103 bool backoff) 122 bool backoff)
104{ 123{
105 struct vmw_resource_val_node *val; 124 struct vmw_resource_val_node *val;
125 struct list_head *list = &sw_context->resource_list;
126
127 if (sw_context->dx_query_mob && !backoff)
128 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129 sw_context->dx_query_mob);
106 130
107 list_for_each_entry(val, list, head) { 131 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res; 132 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup = 133 bool switch_backup =
110 backoff ? NULL : val->new_backup; 134 (backoff) ? false : val->switching_backup;
111 135
112 /* 136 /*
113 * Transfer staged context bindings to the 137 * Transfer staged context bindings to the
@@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
115 */ 139 */
116 if (unlikely(val->staged_bindings)) { 140 if (unlikely(val->staged_bindings)) {
117 if (!backoff) { 141 if (!backoff) {
118 vmw_context_binding_state_transfer 142 vmw_binding_state_commit
119 (val->res, val->staged_bindings); 143 (vmw_context_binding_state(val->res),
144 val->staged_bindings);
120 } 145 }
121 kfree(val->staged_bindings); 146
147 if (val->staged_bindings != sw_context->staged_bindings)
148 vmw_binding_state_free(val->staged_bindings);
149 else
150 sw_context->staged_bindings_inuse = false;
122 val->staged_bindings = NULL; 151 val->staged_bindings = NULL;
123 } 152 }
124 vmw_resource_unreserve(res, new_backup, 153 vmw_resource_unreserve(res, switch_backup, val->new_backup,
125 val->new_backup_offset); 154 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup); 155 vmw_dmabuf_unreference(&val->new_backup);
127 } 156 }
128} 157}
129 158
159/**
160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161 * added to the validate list.
162 *
163 * @dev_priv: Pointer to the device private:
164 * @sw_context: The validation context:
165 * @node: The validation node holding this context.
166 */
167static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168 struct vmw_sw_context *sw_context,
169 struct vmw_resource_val_node *node)
170{
171 int ret;
172
173 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174 if (unlikely(ret != 0))
175 goto out_err;
176
177 if (!sw_context->staged_bindings) {
178 sw_context->staged_bindings =
179 vmw_binding_state_alloc(dev_priv);
180 if (IS_ERR(sw_context->staged_bindings)) {
181 DRM_ERROR("Failed to allocate context binding "
182 "information.\n");
183 ret = PTR_ERR(sw_context->staged_bindings);
184 sw_context->staged_bindings = NULL;
185 goto out_err;
186 }
187 }
188
189 if (sw_context->staged_bindings_inuse) {
190 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191 if (IS_ERR(node->staged_bindings)) {
192 DRM_ERROR("Failed to allocate context binding "
193 "information.\n");
194 ret = PTR_ERR(node->staged_bindings);
195 node->staged_bindings = NULL;
196 goto out_err;
197 }
198 } else {
199 node->staged_bindings = sw_context->staged_bindings;
200 sw_context->staged_bindings_inuse = true;
201 }
202
203 return 0;
204out_err:
205 return ret;
206}
130 207
131/** 208/**
132 * vmw_resource_val_add - Add a resource to the software context's 209 * vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res, 218 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node) 219 struct vmw_resource_val_node **p_node)
143{ 220{
221 struct vmw_private *dev_priv = res->dev_priv;
144 struct vmw_resource_val_node *node; 222 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash; 223 struct drm_hash_item *hash;
146 int ret; 224 int ret;
@@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
169 kfree(node); 247 kfree(node);
170 return ret; 248 return ret;
171 } 249 }
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res); 250 node->res = vmw_resource_reference(res);
174 node->first_usage = true; 251 node->first_usage = true;
175
176 if (unlikely(p_node != NULL)) 252 if (unlikely(p_node != NULL))
177 *p_node = node; 253 *p_node = node;
178 254
179 return 0; 255 if (!dev_priv->has_mob) {
256 list_add_tail(&node->head, &sw_context->resource_list);
257 return 0;
258 }
259
260 switch (vmw_res_type(res)) {
261 case vmw_res_context:
262 case vmw_res_dx_context:
263 list_add(&node->head, &sw_context->ctx_resource_list);
264 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265 break;
266 case vmw_res_cotable:
267 list_add_tail(&node->head, &sw_context->ctx_resource_list);
268 break;
269 default:
270 list_add_tail(&node->head, &sw_context->resource_list);
271 break;
272 }
273
274 return ret;
275}
276
277/**
278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
279 * to the validation list
280 *
281 * @sw_context: The software context holding the validation list.
282 * @view: Pointer to the view resource.
283 *
284 * Returns 0 if success, negative error code otherwise.
285 */
286static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287 struct vmw_resource *view)
288{
289 int ret;
290
291 /*
292 * First add the resource the view is pointing to, otherwise
293 * it may be swapped out when the view is validated.
294 */
295 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296 if (ret)
297 return ret;
298
299 return vmw_resource_val_add(sw_context, view, NULL);
300}
301
302/**
303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
304 * pointing to to the validation list.
305 *
306 * @sw_context: The software context holding the validation list.
307 * @view_type: The view type to look up.
308 * @id: view id of the view.
309 *
310 * The view is represented by a view id and the DX context it's created on,
311 * or scheduled for creation on. If there is no DX context set, the function
312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313 */
314static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315 enum vmw_view_type view_type, u32 id)
316{
317 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318 struct vmw_resource *view;
319 int ret;
320
321 if (!ctx_node) {
322 DRM_ERROR("DX Context not set.\n");
323 return -EINVAL;
324 }
325
326 view = vmw_view_lookup(sw_context->man, view_type, id);
327 if (IS_ERR(view))
328 return PTR_ERR(view);
329
330 ret = vmw_view_res_val_add(sw_context, view);
331 vmw_resource_unreference(&view);
332
333 return ret;
180} 334}
181 335
182/** 336/**
@@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
195 struct vmw_resource *ctx) 349 struct vmw_resource *ctx)
196{ 350{
197 struct list_head *binding_list; 351 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry; 352 struct vmw_ctx_bindinfo *entry;
199 int ret = 0; 353 int ret = 0;
200 struct vmw_resource *res; 354 struct vmw_resource *res;
355 u32 i;
356
357 /* Add all cotables to the validation list. */
358 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360 res = vmw_context_cotable(ctx, i);
361 if (IS_ERR(res))
362 continue;
201 363
364 ret = vmw_resource_val_add(sw_context, res, NULL);
365 vmw_resource_unreference(&res);
366 if (unlikely(ret != 0))
367 return ret;
368 }
369 }
370
371
372 /* Add all resources bound to the context to the validation list */
202 mutex_lock(&dev_priv->binding_mutex); 373 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx); 374 binding_list = vmw_context_binding_list(ctx);
204 375
205 list_for_each_entry(entry, binding_list, ctx_list) { 376 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res); 377 /* entry->res is not refcounted */
378 res = vmw_resource_reference_unless_doomed(entry->res);
207 if (unlikely(res == NULL)) 379 if (unlikely(res == NULL))
208 continue; 380 continue;
209 381
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); 382 if (vmw_res_type(entry->res) == vmw_res_view)
383 ret = vmw_view_res_val_add(sw_context, entry->res);
384 else
385 ret = vmw_resource_val_add(sw_context, entry->res,
386 NULL);
211 vmw_resource_unreference(&res); 387 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0)) 388 if (unlikely(ret != 0))
213 break; 389 break;
214 } 390 }
215 391
392 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393 struct vmw_dma_buffer *dx_query_mob;
394
395 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396 if (dx_query_mob)
397 ret = vmw_bo_to_validate_list(sw_context,
398 dx_query_mob,
399 true, NULL);
400 }
401
216 mutex_unlock(&dev_priv->binding_mutex); 402 mutex_unlock(&dev_priv->binding_mutex);
217 return ret; 403 return ret;
218} 404}
@@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
308 * submission is reached. 494 * submission is reached.
309 */ 495 */
310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 496static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 struct ttm_buffer_object *bo, 497 struct vmw_dma_buffer *vbo,
312 bool validate_as_mob, 498 bool validate_as_mob,
313 uint32_t *p_val_node) 499 uint32_t *p_val_node)
314{ 500{
@@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
318 struct drm_hash_item *hash; 504 struct drm_hash_item *hash;
319 int ret; 505 int ret;
320 506
321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, 507 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
322 &hash) == 0)) { 508 &hash) == 0)) {
323 vval_buf = container_of(hash, struct vmw_validate_buffer, 509 vval_buf = container_of(hash, struct vmw_validate_buffer,
324 hash); 510 hash);
@@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
336 return -EINVAL; 522 return -EINVAL;
337 } 523 }
338 vval_buf = &sw_context->val_bufs[val_node]; 524 vval_buf = &sw_context->val_bufs[val_node];
339 vval_buf->hash.key = (unsigned long) bo; 525 vval_buf->hash.key = (unsigned long) vbo;
340 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); 526 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 if (unlikely(ret != 0)) { 527 if (unlikely(ret != 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation " 528 DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
345 } 531 }
346 ++sw_context->cur_val_buf; 532 ++sw_context->cur_val_buf;
347 val_buf = &vval_buf->base; 533 val_buf = &vval_buf->base;
348 val_buf->bo = ttm_bo_reference(bo); 534 val_buf->bo = ttm_bo_reference(&vbo->base);
349 val_buf->shared = false; 535 val_buf->shared = false;
350 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 536 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 vval_buf->validate_as_mob = validate_as_mob; 537 vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
370static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 556static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
371{ 557{
372 struct vmw_resource_val_node *val; 558 struct vmw_resource_val_node *val;
373 int ret; 559 int ret = 0;
374 560
375 list_for_each_entry(val, &sw_context->resource_list, head) { 561 list_for_each_entry(val, &sw_context->resource_list, head) {
376 struct vmw_resource *res = val->res; 562 struct vmw_resource *res = val->res;
377 563
378 ret = vmw_resource_reserve(res, val->no_buffer_needed); 564 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
379 if (unlikely(ret != 0)) 565 if (unlikely(ret != 0))
380 return ret; 566 return ret;
381 567
382 if (res->backup) { 568 if (res->backup) {
383 struct ttm_buffer_object *bo = &res->backup->base; 569 struct vmw_dma_buffer *vbo = res->backup;
384 570
385 ret = vmw_bo_to_validate_list 571 ret = vmw_bo_to_validate_list
386 (sw_context, bo, 572 (sw_context, vbo,
387 vmw_resource_needs_backup(res), NULL); 573 vmw_resource_needs_backup(res), NULL);
388 574
389 if (unlikely(ret != 0)) 575 if (unlikely(ret != 0))
390 return ret; 576 return ret;
391 } 577 }
392 } 578 }
393 return 0; 579
580 if (sw_context->dx_query_mob) {
581 struct vmw_dma_buffer *expected_dx_query_mob;
582
583 expected_dx_query_mob =
584 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 if (expected_dx_query_mob &&
586 expected_dx_query_mob != sw_context->dx_query_mob) {
587 ret = -EINVAL;
588 }
589 }
590
591 return ret;
394} 592}
395 593
396/** 594/**
@@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
409 607
410 list_for_each_entry(val, &sw_context->resource_list, head) { 608 list_for_each_entry(val, &sw_context->resource_list, head) {
411 struct vmw_resource *res = val->res; 609 struct vmw_resource *res = val->res;
610 struct vmw_dma_buffer *backup = res->backup;
412 611
413 ret = vmw_resource_validate(res); 612 ret = vmw_resource_validate(res);
414 if (unlikely(ret != 0)) { 613 if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
416 DRM_ERROR("Failed to validate resource.\n"); 615 DRM_ERROR("Failed to validate resource.\n");
417 return ret; 616 return ret;
418 } 617 }
618
619 /* Check if the resource switched backup buffer */
620 if (backup && res->backup && (backup != res->backup)) {
621 struct vmw_dma_buffer *vbo = res->backup;
622
623 ret = vmw_bo_to_validate_list
624 (sw_context, vbo,
625 vmw_resource_needs_backup(res), NULL);
626 if (ret) {
627 ttm_bo_unreserve(&vbo->base);
628 return ret;
629 }
630 }
419 } 631 }
420 return 0; 632 return 0;
421} 633}
422 634
423
424/** 635/**
425 * vmw_cmd_res_reloc_add - Add a resource to a software context's 636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
426 * relocation- and validation lists. 637 * relocation- and validation lists.
427 * 638 *
428 * @dev_priv: Pointer to a struct vmw_private identifying the device. 639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
429 * @sw_context: Pointer to the software context. 640 * @sw_context: Pointer to the software context.
430 * @res_type: Resource type.
431 * @id_loc: Pointer to where the id that needs translation is located. 641 * @id_loc: Pointer to where the id that needs translation is located.
432 * @res: Valid pointer to a struct vmw_resource. 642 * @res: Valid pointer to a struct vmw_resource.
433 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node 643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
435 */ 645 */
436static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, 646static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437 struct vmw_sw_context *sw_context, 647 struct vmw_sw_context *sw_context,
438 enum vmw_res_type res_type,
439 uint32_t *id_loc, 648 uint32_t *id_loc,
440 struct vmw_resource *res, 649 struct vmw_resource *res,
441 struct vmw_resource_val_node **p_val) 650 struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
454 if (unlikely(ret != 0)) 663 if (unlikely(ret != 0))
455 return ret; 664 return ret;
456 665
457 if (res_type == vmw_res_context && dev_priv->has_mob &&
458 node->first_usage) {
459
460 /*
461 * Put contexts first on the list to be able to exit
462 * list traversal for contexts early.
463 */
464 list_del(&node->head);
465 list_add(&node->head, &sw_context->resource_list);
466
467 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468 if (unlikely(ret != 0))
469 return ret;
470 node->staged_bindings =
471 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472 if (node->staged_bindings == NULL) {
473 DRM_ERROR("Failed to allocate context binding "
474 "information.\n");
475 return -ENOMEM;
476 }
477 INIT_LIST_HEAD(&node->staged_bindings->list);
478 }
479
480 if (p_val) 666 if (p_val)
481 *p_val = node; 667 *p_val = node;
482 668
@@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
554 rcache->res = res; 740 rcache->res = res;
555 rcache->handle = *id_loc; 741 rcache->handle = *id_loc;
556 742
557 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, 743 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
558 res, &node); 744 res, &node);
559 if (unlikely(ret != 0)) 745 if (unlikely(ret != 0))
560 goto out_no_reloc; 746 goto out_no_reloc;
@@ -573,6 +759,46 @@ out_no_reloc:
573} 759}
574 760
575/** 761/**
762 * vmw_rebind_dx_query - Rebind DX query associated with the context
763 *
764 * @ctx_res: context the query belongs to
765 *
766 * This function assumes binding_mutex is held.
767 */
768static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769{
770 struct vmw_private *dev_priv = ctx_res->dev_priv;
771 struct vmw_dma_buffer *dx_query_mob;
772 struct {
773 SVGA3dCmdHeader header;
774 SVGA3dCmdDXBindAllQuery body;
775 } *cmd;
776
777
778 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779
780 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781 return 0;
782
783 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784
785 if (cmd == NULL) {
786 DRM_ERROR("Failed to rebind queries.\n");
787 return -ENOMEM;
788 }
789
790 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791 cmd->header.size = sizeof(cmd->body);
792 cmd->body.cid = ctx_res->id;
793 cmd->body.mobid = dx_query_mob->base.mem.start;
794 vmw_fifo_commit(dev_priv, sizeof(*cmd));
795
796 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797
798 return 0;
799}
800
801/**
576 * vmw_rebind_contexts - Rebind all resources previously bound to 802 * vmw_rebind_contexts - Rebind all resources previously bound to
577 * referenced contexts. 803 * referenced contexts.
578 * 804 *
@@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
589 if (unlikely(!val->staged_bindings)) 815 if (unlikely(!val->staged_bindings))
590 break; 816 break;
591 817
592 ret = vmw_context_rebind_all(val->res); 818 ret = vmw_binding_rebind_all
819 (vmw_context_binding_state(val->res));
593 if (unlikely(ret != 0)) { 820 if (unlikely(ret != 0)) {
594 if (ret != -ERESTARTSYS) 821 if (ret != -ERESTARTSYS)
595 DRM_ERROR("Failed to rebind context.\n"); 822 DRM_ERROR("Failed to rebind context.\n");
596 return ret; 823 return ret;
597 } 824 }
825
826 ret = vmw_rebind_all_dx_query(val->res);
827 if (ret != 0)
828 return ret;
829 }
830
831 return 0;
832}
833
834/**
835 * vmw_view_bindings_add - Add an array of view bindings to a context
836 * binding state tracker.
837 *
838 * @sw_context: The execbuf state used for this command.
839 * @view_type: View type for the bindings.
840 * @binding_type: Binding type for the bindings.
841 * @shader_slot: The shader slot to user for the bindings.
842 * @view_ids: Array of view ids to be bound.
843 * @num_views: Number of view ids in @view_ids.
844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
845 */
846static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847 enum vmw_view_type view_type,
848 enum vmw_ctx_binding_type binding_type,
849 uint32 shader_slot,
850 uint32 view_ids[], u32 num_views,
851 u32 first_slot)
852{
853 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854 struct vmw_cmdbuf_res_manager *man;
855 u32 i;
856 int ret;
857
858 if (!ctx_node) {
859 DRM_ERROR("DX Context not set.\n");
860 return -EINVAL;
861 }
862
863 man = sw_context->man;
864 for (i = 0; i < num_views; ++i) {
865 struct vmw_ctx_bindinfo_view binding;
866 struct vmw_resource *view = NULL;
867
868 if (view_ids[i] != SVGA3D_INVALID_ID) {
869 view = vmw_view_lookup(man, view_type, view_ids[i]);
870 if (IS_ERR(view)) {
871 DRM_ERROR("View not found.\n");
872 return PTR_ERR(view);
873 }
874
875 ret = vmw_view_res_val_add(sw_context, view);
876 if (ret) {
877 DRM_ERROR("Could not add view to "
878 "validation list.\n");
879 vmw_resource_unreference(&view);
880 return ret;
881 }
882 }
883 binding.bi.ctx = ctx_node->res;
884 binding.bi.res = view;
885 binding.bi.bt = binding_type;
886 binding.shader_slot = shader_slot;
887 binding.slot = first_slot + i;
888 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889 shader_slot, binding.slot);
890 if (view)
891 vmw_resource_unreference(&view);
598 } 892 }
599 893
600 return 0; 894 return 0;
@@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
638 932
639 cmd = container_of(header, struct vmw_sid_cmd, header); 933 cmd = container_of(header, struct vmw_sid_cmd, header);
640 934
935 if (cmd->body.type >= SVGA3D_RT_MAX) {
936 DRM_ERROR("Illegal render target type %u.\n",
937 (unsigned) cmd->body.type);
938 return -EINVAL;
939 }
940
641 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642 user_context_converter, &cmd->body.cid, 942 user_context_converter, &cmd->body.cid,
643 &ctx_node); 943 &ctx_node);
@@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
651 return ret; 951 return ret;
652 952
653 if (dev_priv->has_mob) { 953 if (dev_priv->has_mob) {
654 struct vmw_ctx_bindinfo bi; 954 struct vmw_ctx_bindinfo_view binding;
655 955
656 bi.ctx = ctx_node->res; 956 binding.bi.ctx = ctx_node->res;
657 bi.res = res_node ? res_node->res : NULL; 957 binding.bi.res = res_node ? res_node->res : NULL;
658 bi.bt = vmw_ctx_binding_rt; 958 binding.bi.bt = vmw_ctx_binding_rt;
659 bi.i1.rt_type = cmd->body.type; 959 binding.slot = cmd->body.type;
660 return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 960 vmw_binding_add(ctx_node->staged_bindings,
961 &binding.bi, 0, binding.slot);
661 } 962 }
662 963
663 return 0; 964 return 0;
@@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
674 int ret; 975 int ret;
675 976
676 cmd = container_of(header, struct vmw_sid_cmd, header); 977 cmd = container_of(header, struct vmw_sid_cmd, header);
978
677 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 979 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
678 user_surface_converter, 980 user_surface_converter,
679 &cmd->body.src.sid, NULL); 981 &cmd->body.src.sid, NULL);
680 if (unlikely(ret != 0)) 982 if (ret)
681 return ret; 983 return ret;
984
682 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 985 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
683 user_surface_converter, 986 user_surface_converter,
684 &cmd->body.dest.sid, NULL); 987 &cmd->body.dest.sid, NULL);
685} 988}
686 989
990static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991 struct vmw_sw_context *sw_context,
992 SVGA3dCmdHeader *header)
993{
994 struct {
995 SVGA3dCmdHeader header;
996 SVGA3dCmdDXBufferCopy body;
997 } *cmd;
998 int ret;
999
1000 cmd = container_of(header, typeof(*cmd), header);
1001 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002 user_surface_converter,
1003 &cmd->body.src, NULL);
1004 if (ret != 0)
1005 return ret;
1006
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 user_surface_converter,
1009 &cmd->body.dest, NULL);
1010}
1011
1012static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1015{
1016 struct {
1017 SVGA3dCmdHeader header;
1018 SVGA3dCmdDXPredCopyRegion body;
1019 } *cmd;
1020 int ret;
1021
1022 cmd = container_of(header, typeof(*cmd), header);
1023 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024 user_surface_converter,
1025 &cmd->body.srcSid, NULL);
1026 if (ret != 0)
1027 return ret;
1028
1029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030 user_surface_converter,
1031 &cmd->body.dstSid, NULL);
1032}
1033
687static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 1034static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
688 struct vmw_sw_context *sw_context, 1035 struct vmw_sw_context *sw_context,
689 SVGA3dCmdHeader *header) 1036 SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
752 * command batch. 1099 * command batch.
753 */ 1100 */
754static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1101static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
755 struct ttm_buffer_object *new_query_bo, 1102 struct vmw_dma_buffer *new_query_bo,
756 struct vmw_sw_context *sw_context) 1103 struct vmw_sw_context *sw_context)
757{ 1104{
758 struct vmw_res_cache_entry *ctx_entry = 1105 struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
764 1111
765 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1112 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
766 1113
767 if (unlikely(new_query_bo->num_pages > 4)) { 1114 if (unlikely(new_query_bo->base.num_pages > 4)) {
768 DRM_ERROR("Query buffer too large.\n"); 1115 DRM_ERROR("Query buffer too large.\n");
769 return -EINVAL; 1116 return -EINVAL;
770 } 1117 }
@@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
833 1180
834 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1181 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
835 if (dev_priv->pinned_bo) { 1182 if (dev_priv->pinned_bo) {
836 vmw_bo_pin(dev_priv->pinned_bo, false); 1183 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
837 ttm_bo_unref(&dev_priv->pinned_bo); 1184 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
838 } 1185 }
839 1186
840 if (!sw_context->needs_post_query_barrier) { 1187 if (!sw_context->needs_post_query_barrier) {
841 vmw_bo_pin(sw_context->cur_query_bo, true); 1188 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
842 1189
843 /* 1190 /*
844 * We pin also the dummy_query_bo buffer so that we 1191 * We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
846 * dummy queries in context destroy paths. 1193 * dummy queries in context destroy paths.
847 */ 1194 */
848 1195
849 vmw_bo_pin(dev_priv->dummy_query_bo, true); 1196 if (!dev_priv->dummy_query_bo_pinned) {
850 dev_priv->dummy_query_bo_pinned = true; 1197 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198 true);
1199 dev_priv->dummy_query_bo_pinned = true;
1200 }
851 1201
852 BUG_ON(sw_context->last_query_ctx == NULL); 1202 BUG_ON(sw_context->last_query_ctx == NULL);
853 dev_priv->query_cid = sw_context->last_query_ctx->id; 1203 dev_priv->query_cid = sw_context->last_query_ctx->id;
854 dev_priv->query_cid_valid = true; 1204 dev_priv->query_cid_valid = true;
855 dev_priv->pinned_bo = 1205 dev_priv->pinned_bo =
856 ttm_bo_reference(sw_context->cur_query_bo); 1206 vmw_dmabuf_reference(sw_context->cur_query_bo);
857 } 1207 }
858 } 1208 }
859} 1209}
@@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
882 struct vmw_dma_buffer **vmw_bo_p) 1232 struct vmw_dma_buffer **vmw_bo_p)
883{ 1233{
884 struct vmw_dma_buffer *vmw_bo = NULL; 1234 struct vmw_dma_buffer *vmw_bo = NULL;
885 struct ttm_buffer_object *bo;
886 uint32_t handle = *id; 1235 uint32_t handle = *id;
887 struct vmw_relocation *reloc; 1236 struct vmw_relocation *reloc;
888 int ret; 1237 int ret;
@@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
893 ret = -EINVAL; 1242 ret = -EINVAL;
894 goto out_no_reloc; 1243 goto out_no_reloc;
895 } 1244 }
896 bo = &vmw_bo->base;
897 1245
898 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 1246 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
899 DRM_ERROR("Max number relocations per submission" 1247 DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
906 reloc->mob_loc = id; 1254 reloc->mob_loc = id;
907 reloc->location = NULL; 1255 reloc->location = NULL;
908 1256
909 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); 1257 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
910 if (unlikely(ret != 0)) 1258 if (unlikely(ret != 0))
911 goto out_no_reloc; 1259 goto out_no_reloc;
912 1260
@@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
944 struct vmw_dma_buffer **vmw_bo_p) 1292 struct vmw_dma_buffer **vmw_bo_p)
945{ 1293{
946 struct vmw_dma_buffer *vmw_bo = NULL; 1294 struct vmw_dma_buffer *vmw_bo = NULL;
947 struct ttm_buffer_object *bo;
948 uint32_t handle = ptr->gmrId; 1295 uint32_t handle = ptr->gmrId;
949 struct vmw_relocation *reloc; 1296 struct vmw_relocation *reloc;
950 int ret; 1297 int ret;
@@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
955 ret = -EINVAL; 1302 ret = -EINVAL;
956 goto out_no_reloc; 1303 goto out_no_reloc;
957 } 1304 }
958 bo = &vmw_bo->base;
959 1305
960 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 1306 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
961 DRM_ERROR("Max number relocations per submission" 1307 DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
967 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 1313 reloc = &sw_context->relocs[sw_context->cur_reloc++];
968 reloc->location = ptr; 1314 reloc->location = ptr;
969 1315
970 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); 1316 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
971 if (unlikely(ret != 0)) 1317 if (unlikely(ret != 0))
972 goto out_no_reloc; 1318 goto out_no_reloc;
973 1319
@@ -980,6 +1326,98 @@ out_no_reloc:
980 return ret; 1326 return ret;
981} 1327}
982 1328
1329
1330
1331/**
1332 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 *
1338 * This function adds the new query into the query COTABLE
1339 */
1340static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1341 struct vmw_sw_context *sw_context,
1342 SVGA3dCmdHeader *header)
1343{
1344 struct vmw_dx_define_query_cmd {
1345 SVGA3dCmdHeader header;
1346 SVGA3dCmdDXDefineQuery q;
1347 } *cmd;
1348
1349 int ret;
1350 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1351 struct vmw_resource *cotable_res;
1352
1353
1354 if (ctx_node == NULL) {
1355 DRM_ERROR("DX Context not set for query.\n");
1356 return -EINVAL;
1357 }
1358
1359 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1360
1361 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1362 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1363 return -EINVAL;
1364
1365 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1366 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1367 vmw_resource_unreference(&cotable_res);
1368
1369 return ret;
1370}
1371
1372
1373
1374/**
1375 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1376 *
1377 * @dev_priv: Pointer to a device private struct.
1378 * @sw_context: The software context used for this command submission.
1379 * @header: Pointer to the command header in the command stream.
1380 *
1381 * The query bind operation will eventually associate the query ID
1382 * with its backing MOB. In this function, we take the user mode
1383 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1384 * kernel mode equivalent.
1385 */
1386static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1387 struct vmw_sw_context *sw_context,
1388 SVGA3dCmdHeader *header)
1389{
1390 struct vmw_dx_bind_query_cmd {
1391 SVGA3dCmdHeader header;
1392 SVGA3dCmdDXBindQuery q;
1393 } *cmd;
1394
1395 struct vmw_dma_buffer *vmw_bo;
1396 int ret;
1397
1398
1399 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1400
1401 /*
1402 * Look up the buffer pointed to by q.mobid, put it on the relocation
1403 * list so its kernel mode MOB ID can be filled in later
1404 */
1405 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1406 &vmw_bo);
1407
1408 if (ret != 0)
1409 return ret;
1410
1411 sw_context->dx_query_mob = vmw_bo;
1412 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1413
1414 vmw_dmabuf_unreference(&vmw_bo);
1415
1416 return ret;
1417}
1418
1419
1420
983/** 1421/**
984 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. 1422 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
985 * 1423 *
@@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1074 if (unlikely(ret != 0)) 1512 if (unlikely(ret != 0))
1075 return ret; 1513 return ret;
1076 1514
1077 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); 1515 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1078 1516
1079 vmw_dmabuf_unreference(&vmw_bo); 1517 vmw_dmabuf_unreference(&vmw_bo);
1080 return ret; 1518 return ret;
@@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1128 if (unlikely(ret != 0)) 1566 if (unlikely(ret != 0))
1129 return ret; 1567 return ret;
1130 1568
1131 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); 1569 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1132 1570
1133 vmw_dmabuf_unreference(&vmw_bo); 1571 vmw_dmabuf_unreference(&vmw_bo);
1134 return ret; 1572 return ret;
@@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1363 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1801 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1364 continue; 1802 continue;
1365 1803
1804 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1805 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1806 (unsigned) cur_state->stage);
1807 return -EINVAL;
1808 }
1809
1366 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1810 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1367 user_surface_converter, 1811 user_surface_converter,
1368 &cur_state->value, &res_node); 1812 &cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1370 return ret; 1814 return ret;
1371 1815
1372 if (dev_priv->has_mob) { 1816 if (dev_priv->has_mob) {
1373 struct vmw_ctx_bindinfo bi; 1817 struct vmw_ctx_bindinfo_tex binding;
1374 1818
1375 bi.ctx = ctx_node->res; 1819 binding.bi.ctx = ctx_node->res;
1376 bi.res = res_node ? res_node->res : NULL; 1820 binding.bi.res = res_node ? res_node->res : NULL;
1377 bi.bt = vmw_ctx_binding_tex; 1821 binding.bi.bt = vmw_ctx_binding_tex;
1378 bi.i1.texture_stage = cur_state->stage; 1822 binding.texture_stage = cur_state->stage;
1379 vmw_context_binding_add(ctx_node->staged_bindings, 1823 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1380 &bi); 1824 0, binding.texture_stage);
1381 } 1825 }
1382 } 1826 }
1383 1827
@@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1407 return ret; 1851 return ret;
1408} 1852}
1409 1853
1854
1855/**
1856 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1857 * switching
1858 *
1859 * @dev_priv: Pointer to a device private struct.
1860 * @sw_context: The software context being used for this batch.
1861 * @val_node: The validation node representing the resource.
1862 * @buf_id: Pointer to the user-space backup buffer handle in the command
1863 * stream.
1864 * @backup_offset: Offset of backup into MOB.
1865 *
1866 * This function prepares for registering a switch of backup buffers
1867 * in the resource metadata just prior to unreserving. It's basically a wrapper
1868 * around vmw_cmd_res_switch_backup with a different interface.
1869 */
1870static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1871 struct vmw_sw_context *sw_context,
1872 struct vmw_resource_val_node *val_node,
1873 uint32_t *buf_id,
1874 unsigned long backup_offset)
1875{
1876 struct vmw_dma_buffer *dma_buf;
1877 int ret;
1878
1879 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1880 if (ret)
1881 return ret;
1882
1883 val_node->switching_backup = true;
1884 if (val_node->first_usage)
1885 val_node->no_buffer_needed = true;
1886
1887 vmw_dmabuf_unreference(&val_node->new_backup);
1888 val_node->new_backup = dma_buf;
1889 val_node->new_backup_offset = backup_offset;
1890
1891 return 0;
1892}
1893
1894
1410/** 1895/**
1411 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1896 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1412 * 1897 *
@@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1420 * @backup_offset: Offset of backup into MOB. 1905 * @backup_offset: Offset of backup into MOB.
1421 * 1906 *
1422 * This function prepares for registering a switch of backup buffers 1907 * This function prepares for registering a switch of backup buffers
1423 * in the resource metadata just prior to unreserving. 1908 * in the resource metadata just prior to unreserving. It's basically a wrapper
1909 * around vmw_cmd_res_switch_backup with a different interface.
1424 */ 1910 */
1425static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1911static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1426 struct vmw_sw_context *sw_context, 1912 struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1431 uint32_t *buf_id, 1917 uint32_t *buf_id,
1432 unsigned long backup_offset) 1918 unsigned long backup_offset)
1433{ 1919{
1434 int ret;
1435 struct vmw_dma_buffer *dma_buf;
1436 struct vmw_resource_val_node *val_node; 1920 struct vmw_resource_val_node *val_node;
1921 int ret;
1437 1922
1438 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1923 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1439 converter, res_id, &val_node); 1924 converter, res_id, &val_node);
1440 if (unlikely(ret != 0)) 1925 if (ret)
1441 return ret;
1442
1443 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1444 if (unlikely(ret != 0))
1445 return ret; 1926 return ret;
1446 1927
1447 if (val_node->first_usage) 1928 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1448 val_node->no_buffer_needed = true; 1929 buf_id, backup_offset);
1449
1450 vmw_dmabuf_unreference(&val_node->new_backup);
1451 val_node->new_backup = dma_buf;
1452 val_node->new_backup_offset = backup_offset;
1453
1454 return 0;
1455} 1930}
1456 1931
1457/** 1932/**
@@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1703 if (unlikely(!dev_priv->has_mob)) 2178 if (unlikely(!dev_priv->has_mob))
1704 return 0; 2179 return 0;
1705 2180
1706 ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), 2181 ret = vmw_shader_remove(vmw_context_res_man(val->res),
1707 cmd->body.shid, 2182 cmd->body.shid,
1708 cmd->body.type, 2183 cmd->body.type,
1709 &sw_context->staged_cmd_res); 2184 &sw_context->staged_cmd_res);
1710 if (unlikely(ret != 0)) 2185 if (unlikely(ret != 0))
1711 return ret; 2186 return ret;
1712 2187
@@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1734 SVGA3dCmdSetShader body; 2209 SVGA3dCmdSetShader body;
1735 } *cmd; 2210 } *cmd;
1736 struct vmw_resource_val_node *ctx_node, *res_node = NULL; 2211 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1737 struct vmw_ctx_bindinfo bi; 2212 struct vmw_ctx_bindinfo_shader binding;
1738 struct vmw_resource *res = NULL; 2213 struct vmw_resource *res = NULL;
1739 int ret; 2214 int ret;
1740 2215
1741 cmd = container_of(header, struct vmw_set_shader_cmd, 2216 cmd = container_of(header, struct vmw_set_shader_cmd,
1742 header); 2217 header);
1743 2218
2219 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2220 DRM_ERROR("Illegal shader type %u.\n",
2221 (unsigned) cmd->body.type);
2222 return -EINVAL;
2223 }
2224
1744 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1745 user_context_converter, &cmd->body.cid, 2226 user_context_converter, &cmd->body.cid,
1746 &ctx_node); 2227 &ctx_node);
@@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1751 return 0; 2232 return 0;
1752 2233
1753 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2234 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1754 res = vmw_compat_shader_lookup 2235 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
1755 (vmw_context_res_man(ctx_node->res), 2236 cmd->body.shid,
1756 cmd->body.shid, 2237 cmd->body.type);
1757 cmd->body.type);
1758 2238
1759 if (!IS_ERR(res)) { 2239 if (!IS_ERR(res)) {
1760 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, 2240 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1761 vmw_res_shader,
1762 &cmd->body.shid, res, 2241 &cmd->body.shid, res,
1763 &res_node); 2242 &res_node);
1764 vmw_resource_unreference(&res); 2243 vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1776 return ret; 2255 return ret;
1777 } 2256 }
1778 2257
1779 bi.ctx = ctx_node->res; 2258 binding.bi.ctx = ctx_node->res;
1780 bi.res = res_node ? res_node->res : NULL; 2259 binding.bi.res = res_node ? res_node->res : NULL;
1781 bi.bt = vmw_ctx_binding_shader; 2260 binding.bi.bt = vmw_ctx_binding_shader;
1782 bi.i1.shader_type = cmd->body.type; 2261 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
1783 return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 2262 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2263 binding.shader_slot, 0);
2264 return 0;
1784} 2265}
1785 2266
1786/** 2267/**
@@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1842 cmd->body.offsetInBytes); 2323 cmd->body.offsetInBytes);
1843} 2324}
1844 2325
2326/**
2327 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2328 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2329 *
2330 * @dev_priv: Pointer to a device private struct.
2331 * @sw_context: The software context being used for this batch.
2332 * @header: Pointer to the command header in the command stream.
2333 */
2334static int
2335vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2336 struct vmw_sw_context *sw_context,
2337 SVGA3dCmdHeader *header)
2338{
2339 struct {
2340 SVGA3dCmdHeader header;
2341 SVGA3dCmdDXSetSingleConstantBuffer body;
2342 } *cmd;
2343 struct vmw_resource_val_node *res_node = NULL;
2344 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2345 struct vmw_ctx_bindinfo_cb binding;
2346 int ret;
2347
2348 if (unlikely(ctx_node == NULL)) {
2349 DRM_ERROR("DX Context not set.\n");
2350 return -EINVAL;
2351 }
2352
2353 cmd = container_of(header, typeof(*cmd), header);
2354 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2355 user_surface_converter,
2356 &cmd->body.sid, &res_node);
2357 if (unlikely(ret != 0))
2358 return ret;
2359
2360 binding.bi.ctx = ctx_node->res;
2361 binding.bi.res = res_node ? res_node->res : NULL;
2362 binding.bi.bt = vmw_ctx_binding_cb;
2363 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2364 binding.offset = cmd->body.offsetInBytes;
2365 binding.size = cmd->body.sizeInBytes;
2366 binding.slot = cmd->body.slot;
2367
2368 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2369 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2370 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2371 (unsigned) cmd->body.type,
2372 (unsigned) binding.slot);
2373 return -EINVAL;
2374 }
2375
2376 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2377 binding.shader_slot, binding.slot);
2378
2379 return 0;
2380}
2381
2382/**
2383 * vmw_cmd_dx_set_shader_res - Validate an
2384 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2385 *
2386 * @dev_priv: Pointer to a device private struct.
2387 * @sw_context: The software context being used for this batch.
2388 * @header: Pointer to the command header in the command stream.
2389 */
2390static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2393{
2394 struct {
2395 SVGA3dCmdHeader header;
2396 SVGA3dCmdDXSetShaderResources body;
2397 } *cmd = container_of(header, typeof(*cmd), header);
2398 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2399 sizeof(SVGA3dShaderResourceViewId);
2400
2401 if ((u64) cmd->body.startView + (u64) num_sr_view >
2402 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2403 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2404 DRM_ERROR("Invalid shader binding.\n");
2405 return -EINVAL;
2406 }
2407
2408 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2409 vmw_ctx_binding_sr,
2410 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2411 (void *) &cmd[1], num_sr_view,
2412 cmd->body.startView);
2413}
2414
2415/**
2416 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2417 * command
2418 *
2419 * @dev_priv: Pointer to a device private struct.
2420 * @sw_context: The software context being used for this batch.
2421 * @header: Pointer to the command header in the command stream.
2422 */
2423static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2424 struct vmw_sw_context *sw_context,
2425 SVGA3dCmdHeader *header)
2426{
2427 struct {
2428 SVGA3dCmdHeader header;
2429 SVGA3dCmdDXSetShader body;
2430 } *cmd;
2431 struct vmw_resource *res = NULL;
2432 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2433 struct vmw_ctx_bindinfo_shader binding;
2434 int ret = 0;
2435
2436 if (unlikely(ctx_node == NULL)) {
2437 DRM_ERROR("DX Context not set.\n");
2438 return -EINVAL;
2439 }
2440
2441 cmd = container_of(header, typeof(*cmd), header);
2442
2443 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2444 DRM_ERROR("Illegal shader type %u.\n",
2445 (unsigned) cmd->body.type);
2446 return -EINVAL;
2447 }
2448
2449 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2450 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2451 if (IS_ERR(res)) {
2452 DRM_ERROR("Could not find shader for binding.\n");
2453 return PTR_ERR(res);
2454 }
2455
2456 ret = vmw_resource_val_add(sw_context, res, NULL);
2457 if (ret)
2458 goto out_unref;
2459 }
2460
2461 binding.bi.ctx = ctx_node->res;
2462 binding.bi.res = res;
2463 binding.bi.bt = vmw_ctx_binding_dx_shader;
2464 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2465
2466 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2467 binding.shader_slot, 0);
2468out_unref:
2469 if (res)
2470 vmw_resource_unreference(&res);
2471
2472 return ret;
2473}
2474
2475/**
2476 * vmw_cmd_dx_set_vertex_buffers - Validates an
2477 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2478 *
2479 * @dev_priv: Pointer to a device private struct.
2480 * @sw_context: The software context being used for this batch.
2481 * @header: Pointer to the command header in the command stream.
2482 */
2483static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2484 struct vmw_sw_context *sw_context,
2485 SVGA3dCmdHeader *header)
2486{
2487 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488 struct vmw_ctx_bindinfo_vb binding;
2489 struct vmw_resource_val_node *res_node;
2490 struct {
2491 SVGA3dCmdHeader header;
2492 SVGA3dCmdDXSetVertexBuffers body;
2493 SVGA3dVertexBuffer buf[];
2494 } *cmd;
2495 int i, ret, num;
2496
2497 if (unlikely(ctx_node == NULL)) {
2498 DRM_ERROR("DX Context not set.\n");
2499 return -EINVAL;
2500 }
2501
2502 cmd = container_of(header, typeof(*cmd), header);
2503 num = (cmd->header.size - sizeof(cmd->body)) /
2504 sizeof(SVGA3dVertexBuffer);
2505 if ((u64)num + (u64)cmd->body.startBuffer >
2506 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2507 DRM_ERROR("Invalid number of vertex buffers.\n");
2508 return -EINVAL;
2509 }
2510
2511 for (i = 0; i < num; i++) {
2512 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2513 user_surface_converter,
2514 &cmd->buf[i].sid, &res_node);
2515 if (unlikely(ret != 0))
2516 return ret;
2517
2518 binding.bi.ctx = ctx_node->res;
2519 binding.bi.bt = vmw_ctx_binding_vb;
2520 binding.bi.res = ((res_node) ? res_node->res : NULL);
2521 binding.offset = cmd->buf[i].offset;
2522 binding.stride = cmd->buf[i].stride;
2523 binding.slot = i + cmd->body.startBuffer;
2524
2525 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2526 0, binding.slot);
2527 }
2528
2529 return 0;
2530}
2531
2532/**
2533 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2534 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2535 *
2536 * @dev_priv: Pointer to a device private struct.
2537 * @sw_context: The software context being used for this batch.
2538 * @header: Pointer to the command header in the command stream.
2539 */
2540static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2541 struct vmw_sw_context *sw_context,
2542 SVGA3dCmdHeader *header)
2543{
2544 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2545 struct vmw_ctx_bindinfo_ib binding;
2546 struct vmw_resource_val_node *res_node;
2547 struct {
2548 SVGA3dCmdHeader header;
2549 SVGA3dCmdDXSetIndexBuffer body;
2550 } *cmd;
2551 int ret;
2552
2553 if (unlikely(ctx_node == NULL)) {
2554 DRM_ERROR("DX Context not set.\n");
2555 return -EINVAL;
2556 }
2557
2558 cmd = container_of(header, typeof(*cmd), header);
2559 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2560 user_surface_converter,
2561 &cmd->body.sid, &res_node);
2562 if (unlikely(ret != 0))
2563 return ret;
2564
2565 binding.bi.ctx = ctx_node->res;
2566 binding.bi.res = ((res_node) ? res_node->res : NULL);
2567 binding.bi.bt = vmw_ctx_binding_ib;
2568 binding.offset = cmd->body.offset;
2569 binding.format = cmd->body.format;
2570
2571 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2572
2573 return 0;
2574}
2575
2576/**
2577 * vmw_cmd_dx_set_rendertarget - Validate an
2578 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2579 *
2580 * @dev_priv: Pointer to a device private struct.
2581 * @sw_context: The software context being used for this batch.
2582 * @header: Pointer to the command header in the command stream.
2583 */
2584static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2585 struct vmw_sw_context *sw_context,
2586 SVGA3dCmdHeader *header)
2587{
2588 struct {
2589 SVGA3dCmdHeader header;
2590 SVGA3dCmdDXSetRenderTargets body;
2591 } *cmd = container_of(header, typeof(*cmd), header);
2592 int ret;
2593 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2594 sizeof(SVGA3dRenderTargetViewId);
2595
2596 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2597 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2598 return -EINVAL;
2599 }
2600
2601 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2602 vmw_ctx_binding_ds, 0,
2603 &cmd->body.depthStencilViewId, 1, 0);
2604 if (ret)
2605 return ret;
2606
2607 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2608 vmw_ctx_binding_dx_rt, 0,
2609 (void *)&cmd[1], num_rt_view, 0);
2610}
2611
2612/**
2613 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2614 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2615 *
2616 * @dev_priv: Pointer to a device private struct.
2617 * @sw_context: The software context being used for this batch.
2618 * @header: Pointer to the command header in the command stream.
2619 */
2620static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2621 struct vmw_sw_context *sw_context,
2622 SVGA3dCmdHeader *header)
2623{
2624 struct {
2625 SVGA3dCmdHeader header;
2626 SVGA3dCmdDXClearRenderTargetView body;
2627 } *cmd = container_of(header, typeof(*cmd), header);
2628
2629 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2630 cmd->body.renderTargetViewId);
2631}
2632
2633/**
2634 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2635 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2636 *
2637 * @dev_priv: Pointer to a device private struct.
2638 * @sw_context: The software context being used for this batch.
2639 * @header: Pointer to the command header in the command stream.
2640 */
2641static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2642 struct vmw_sw_context *sw_context,
2643 SVGA3dCmdHeader *header)
2644{
2645 struct {
2646 SVGA3dCmdHeader header;
2647 SVGA3dCmdDXClearDepthStencilView body;
2648 } *cmd = container_of(header, typeof(*cmd), header);
2649
2650 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2651 cmd->body.depthStencilViewId);
2652}
2653
2654static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2655 struct vmw_sw_context *sw_context,
2656 SVGA3dCmdHeader *header)
2657{
2658 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2659 struct vmw_resource_val_node *srf_node;
2660 struct vmw_resource *res;
2661 enum vmw_view_type view_type;
2662 int ret;
2663 /*
2664 * This is based on the fact that all affected define commands have
2665 * the same initial command body layout.
2666 */
2667 struct {
2668 SVGA3dCmdHeader header;
2669 uint32 defined_id;
2670 uint32 sid;
2671 } *cmd;
2672
2673 if (unlikely(ctx_node == NULL)) {
2674 DRM_ERROR("DX Context not set.\n");
2675 return -EINVAL;
2676 }
2677
2678 view_type = vmw_view_cmd_to_type(header->id);
2679 cmd = container_of(header, typeof(*cmd), header);
2680 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2681 user_surface_converter,
2682 &cmd->sid, &srf_node);
2683 if (unlikely(ret != 0))
2684 return ret;
2685
2686 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2687 ret = vmw_cotable_notify(res, cmd->defined_id);
2688 vmw_resource_unreference(&res);
2689 if (unlikely(ret != 0))
2690 return ret;
2691
2692 return vmw_view_add(sw_context->man,
2693 ctx_node->res,
2694 srf_node->res,
2695 view_type,
2696 cmd->defined_id,
2697 header,
2698 header->size + sizeof(*header),
2699 &sw_context->staged_cmd_res);
2700}
2701
2702/**
2703 * vmw_cmd_dx_set_so_targets - Validate an
2704 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2705 *
2706 * @dev_priv: Pointer to a device private struct.
2707 * @sw_context: The software context being used for this batch.
2708 * @header: Pointer to the command header in the command stream.
2709 */
2710static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2711 struct vmw_sw_context *sw_context,
2712 SVGA3dCmdHeader *header)
2713{
2714 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2715 struct vmw_ctx_bindinfo_so binding;
2716 struct vmw_resource_val_node *res_node;
2717 struct {
2718 SVGA3dCmdHeader header;
2719 SVGA3dCmdDXSetSOTargets body;
2720 SVGA3dSoTarget targets[];
2721 } *cmd;
2722 int i, ret, num;
2723
2724 if (unlikely(ctx_node == NULL)) {
2725 DRM_ERROR("DX Context not set.\n");
2726 return -EINVAL;
2727 }
2728
2729 cmd = container_of(header, typeof(*cmd), header);
2730 num = (cmd->header.size - sizeof(cmd->body)) /
2731 sizeof(SVGA3dSoTarget);
2732
2733 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2734 DRM_ERROR("Invalid DX SO binding.\n");
2735 return -EINVAL;
2736 }
2737
2738 for (i = 0; i < num; i++) {
2739 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2740 user_surface_converter,
2741 &cmd->targets[i].sid, &res_node);
2742 if (unlikely(ret != 0))
2743 return ret;
2744
2745 binding.bi.ctx = ctx_node->res;
2746 binding.bi.res = ((res_node) ? res_node->res : NULL);
2747 binding.bi.bt = vmw_ctx_binding_so,
2748 binding.offset = cmd->targets[i].offset;
2749 binding.size = cmd->targets[i].sizeInBytes;
2750 binding.slot = i;
2751
2752 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2753 0, binding.slot);
2754 }
2755
2756 return 0;
2757}
2758
2759static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2760 struct vmw_sw_context *sw_context,
2761 SVGA3dCmdHeader *header)
2762{
2763 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2764 struct vmw_resource *res;
2765 /*
2766 * This is based on the fact that all affected define commands have
2767 * the same initial command body layout.
2768 */
2769 struct {
2770 SVGA3dCmdHeader header;
2771 uint32 defined_id;
2772 } *cmd;
2773 enum vmw_so_type so_type;
2774 int ret;
2775
2776 if (unlikely(ctx_node == NULL)) {
2777 DRM_ERROR("DX Context not set.\n");
2778 return -EINVAL;
2779 }
2780
2781 so_type = vmw_so_cmd_to_type(header->id);
2782 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2783 cmd = container_of(header, typeof(*cmd), header);
2784 ret = vmw_cotable_notify(res, cmd->defined_id);
2785 vmw_resource_unreference(&res);
2786
2787 return ret;
2788}
2789
2790/**
2791 * vmw_cmd_dx_check_subresource - Validate an
2792 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2793 *
2794 * @dev_priv: Pointer to a device private struct.
2795 * @sw_context: The software context being used for this batch.
2796 * @header: Pointer to the command header in the command stream.
2797 */
2798static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2799 struct vmw_sw_context *sw_context,
2800 SVGA3dCmdHeader *header)
2801{
2802 struct {
2803 SVGA3dCmdHeader header;
2804 union {
2805 SVGA3dCmdDXReadbackSubResource r_body;
2806 SVGA3dCmdDXInvalidateSubResource i_body;
2807 SVGA3dCmdDXUpdateSubResource u_body;
2808 SVGA3dSurfaceId sid;
2809 };
2810 } *cmd;
2811
2812 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2813 offsetof(typeof(*cmd), sid));
2814 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2815 offsetof(typeof(*cmd), sid));
2816 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2817 offsetof(typeof(*cmd), sid));
2818
2819 cmd = container_of(header, typeof(*cmd), header);
2820
2821 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2822 user_surface_converter,
2823 &cmd->sid, NULL);
2824}
2825
2826static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2827 struct vmw_sw_context *sw_context,
2828 SVGA3dCmdHeader *header)
2829{
2830 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2831
2832 if (unlikely(ctx_node == NULL)) {
2833 DRM_ERROR("DX Context not set.\n");
2834 return -EINVAL;
2835 }
2836
2837 return 0;
2838}
2839
2840/**
2841 * vmw_cmd_dx_view_remove - validate a view remove command and
2842 * schedule the view resource for removal.
2843 *
2844 * @dev_priv: Pointer to a device private struct.
2845 * @sw_context: The software context being used for this batch.
2846 * @header: Pointer to the command header in the command stream.
2847 *
2848 * Check that the view exists, and if it was not created using this
2849 * command batch, make sure it's validated (present in the device) so that
2850 * the remove command will not confuse the device.
2851 */
2852static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2853 struct vmw_sw_context *sw_context,
2854 SVGA3dCmdHeader *header)
2855{
2856 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2857 struct {
2858 SVGA3dCmdHeader header;
2859 union vmw_view_destroy body;
2860 } *cmd = container_of(header, typeof(*cmd), header);
2861 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2862 struct vmw_resource *view;
2863 int ret;
2864
2865 if (!ctx_node) {
2866 DRM_ERROR("DX Context not set.\n");
2867 return -EINVAL;
2868 }
2869
2870 ret = vmw_view_remove(sw_context->man,
2871 cmd->body.view_id, view_type,
2872 &sw_context->staged_cmd_res,
2873 &view);
2874 if (ret || !view)
2875 return ret;
2876
2877 /*
2878 * Add view to the validate list iff it was not created using this
2879 * command batch.
2880 */
2881 return vmw_view_res_val_add(sw_context, view);
2882}
2883
2884/**
2885 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2886 * command
2887 *
2888 * @dev_priv: Pointer to a device private struct.
2889 * @sw_context: The software context being used for this batch.
2890 * @header: Pointer to the command header in the command stream.
2891 */
2892static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2893 struct vmw_sw_context *sw_context,
2894 SVGA3dCmdHeader *header)
2895{
2896 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2897 struct vmw_resource *res;
2898 struct {
2899 SVGA3dCmdHeader header;
2900 SVGA3dCmdDXDefineShader body;
2901 } *cmd = container_of(header, typeof(*cmd), header);
2902 int ret;
2903
2904 if (!ctx_node) {
2905 DRM_ERROR("DX Context not set.\n");
2906 return -EINVAL;
2907 }
2908
2909 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2910 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2911 vmw_resource_unreference(&res);
2912 if (ret)
2913 return ret;
2914
2915 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2916 cmd->body.shaderId, cmd->body.type,
2917 &sw_context->staged_cmd_res);
2918}
2919
2920/**
2921 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2922 * command
2923 *
2924 * @dev_priv: Pointer to a device private struct.
2925 * @sw_context: The software context being used for this batch.
2926 * @header: Pointer to the command header in the command stream.
2927 */
2928static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2929 struct vmw_sw_context *sw_context,
2930 SVGA3dCmdHeader *header)
2931{
2932 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2933 struct {
2934 SVGA3dCmdHeader header;
2935 SVGA3dCmdDXDestroyShader body;
2936 } *cmd = container_of(header, typeof(*cmd), header);
2937 int ret;
2938
2939 if (!ctx_node) {
2940 DRM_ERROR("DX Context not set.\n");
2941 return -EINVAL;
2942 }
2943
2944 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2945 &sw_context->staged_cmd_res);
2946 if (ret)
2947 DRM_ERROR("Could not find shader to remove.\n");
2948
2949 return ret;
2950}
2951
2952/**
2953 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2954 * command
2955 *
2956 * @dev_priv: Pointer to a device private struct.
2957 * @sw_context: The software context being used for this batch.
2958 * @header: Pointer to the command header in the command stream.
2959 */
2960static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2961 struct vmw_sw_context *sw_context,
2962 SVGA3dCmdHeader *header)
2963{
2964 struct vmw_resource_val_node *ctx_node;
2965 struct vmw_resource_val_node *res_node;
2966 struct vmw_resource *res;
2967 struct {
2968 SVGA3dCmdHeader header;
2969 SVGA3dCmdDXBindShader body;
2970 } *cmd = container_of(header, typeof(*cmd), header);
2971 int ret;
2972
2973 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2974 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2975 user_context_converter,
2976 &cmd->body.cid, &ctx_node);
2977 if (ret)
2978 return ret;
2979 } else {
2980 ctx_node = sw_context->dx_ctx_node;
2981 if (!ctx_node) {
2982 DRM_ERROR("DX Context not set.\n");
2983 return -EINVAL;
2984 }
2985 }
2986
2987 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2988 cmd->body.shid, 0);
2989 if (IS_ERR(res)) {
2990 DRM_ERROR("Could not find shader to bind.\n");
2991 return PTR_ERR(res);
2992 }
2993
2994 ret = vmw_resource_val_add(sw_context, res, &res_node);
2995 if (ret) {
2996 DRM_ERROR("Error creating resource validation node.\n");
2997 goto out_unref;
2998 }
2999
3000
3001 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3002 &cmd->body.mobid,
3003 cmd->body.offsetInBytes);
3004out_unref:
3005 vmw_resource_unreference(&res);
3006
3007 return ret;
3008}
3009
1845static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3010static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1846 struct vmw_sw_context *sw_context, 3011 struct vmw_sw_context *sw_context,
1847 void *buf, uint32_t *size) 3012 void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1849 uint32_t size_remaining = *size; 3014 uint32_t size_remaining = *size;
1850 uint32_t cmd_id; 3015 uint32_t cmd_id;
1851 3016
1852 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 3017 cmd_id = ((uint32_t *)buf)[0];
1853 switch (cmd_id) { 3018 switch (cmd_id) {
1854 case SVGA_CMD_UPDATE: 3019 case SVGA_CMD_UPDATE:
1855 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 3020 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1980 false, false, true), 3145 false, false, true),
1981 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 3146 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1982 false, false, true), 3147 false, false, true),
1983 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, 3148 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
1984 false, false, true), 3149 false, false, true),
1985 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 3150 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1986 false, false, true), 3151 false, false, true),
@@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2051 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3216 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2052 false, false, true), 3217 false, false, true),
2053 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 3218 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2054 true, false, true) 3219 true, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3227 false, false, true),
3228
3229 /*
3230 * DX commands
3231 */
3232 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3233 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3241 false, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3243 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3245 &vmw_cmd_dx_set_shader_res, true, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3247 true, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3249 true, false, true),
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3251 true, false, true),
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3253 true, false, true),
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3255 true, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3257 &vmw_cmd_dx_cid_check, true, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3259 true, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3261 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3263 &vmw_cmd_dx_set_index_buffer, true, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3265 &vmw_cmd_dx_set_rendertargets, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3267 true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3269 &vmw_cmd_dx_cid_check, true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3271 &vmw_cmd_dx_cid_check, true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3273 true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
3275 true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3277 true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3279 &vmw_cmd_ok, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
3281 true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
3283 true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3285 true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3287 true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3289 true, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3291 true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3293 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3295 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3301 &vmw_cmd_dx_check_subresource, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3303 &vmw_cmd_dx_check_subresource, true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3305 &vmw_cmd_dx_check_subresource, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3307 &vmw_cmd_dx_view_define, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3309 &vmw_cmd_dx_view_remove, true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3311 &vmw_cmd_dx_view_define, true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3313 &vmw_cmd_dx_view_remove, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3315 &vmw_cmd_dx_view_define, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3317 &vmw_cmd_dx_view_remove, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3319 &vmw_cmd_dx_so_define, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3321 &vmw_cmd_dx_cid_check, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3323 &vmw_cmd_dx_so_define, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3325 &vmw_cmd_dx_cid_check, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3327 &vmw_cmd_dx_so_define, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3329 &vmw_cmd_dx_cid_check, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3331 &vmw_cmd_dx_so_define, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3333 &vmw_cmd_dx_cid_check, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3335 &vmw_cmd_dx_so_define, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3337 &vmw_cmd_dx_cid_check, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3339 &vmw_cmd_dx_define_shader, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3341 &vmw_cmd_dx_destroy_shader, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3343 &vmw_cmd_dx_bind_shader, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3345 &vmw_cmd_dx_so_define, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3347 &vmw_cmd_dx_cid_check, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3349 true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3351 &vmw_cmd_dx_set_so_targets, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3353 &vmw_cmd_dx_cid_check, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3355 &vmw_cmd_dx_cid_check, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3357 &vmw_cmd_buffer_copy_check, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3359 &vmw_cmd_pred_copy_check, true, false, true),
2055}; 3360};
2056 3361
2057static int vmw_cmd_check(struct vmw_private *dev_priv, 3362static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
2065 const struct vmw_cmd_entry *entry; 3370 const struct vmw_cmd_entry *entry;
2066 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 3371 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2067 3372
2068 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 3373 cmd_id = ((uint32_t *)buf)[0];
2069 /* Handle any none 3D commands */ 3374 /* Handle any none 3D commands */
2070 if (unlikely(cmd_id < SVGA_CMD_MAX)) 3375 if (unlikely(cmd_id < SVGA_CMD_MAX))
2071 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 3376 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2072 3377
2073 3378
2074 cmd_id = le32_to_cpu(header->id); 3379 cmd_id = header->id;
2075 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); 3380 *size = header->size + sizeof(SVGA3dCmdHeader);
2076 3381
2077 cmd_id -= SVGA_3D_CMD_BASE; 3382 cmd_id -= SVGA_3D_CMD_BASE;
2078 if (unlikely(*size > size_remaining)) 3383 if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2184 * 3489 *
2185 * @list: The resource list. 3490 * @list: The resource list.
2186 */ 3491 */
2187static void vmw_resource_list_unreference(struct list_head *list) 3492static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3493 struct list_head *list)
2188{ 3494{
2189 struct vmw_resource_val_node *val, *val_next; 3495 struct vmw_resource_val_node *val, *val_next;
2190 3496
@@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
2195 list_for_each_entry_safe(val, val_next, list, head) { 3501 list_for_each_entry_safe(val, val_next, list, head) {
2196 list_del_init(&val->head); 3502 list_del_init(&val->head);
2197 vmw_resource_unreference(&val->res); 3503 vmw_resource_unreference(&val->res);
2198 if (unlikely(val->staged_bindings)) 3504
2199 kfree(val->staged_bindings); 3505 if (val->staged_bindings) {
3506 if (val->staged_bindings != sw_context->staged_bindings)
3507 vmw_binding_state_free(val->staged_bindings);
3508 else
3509 sw_context->staged_bindings_inuse = false;
3510 val->staged_bindings = NULL;
3511 }
3512
2200 kfree(val); 3513 kfree(val);
2201 } 3514 }
2202} 3515}
@@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2222 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); 3535 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2223} 3536}
2224 3537
2225static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 3538int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2226 struct ttm_buffer_object *bo, 3539 struct ttm_buffer_object *bo,
2227 bool validate_as_mob) 3540 bool interruptible,
3541 bool validate_as_mob)
2228{ 3542{
3543 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3544 base);
2229 int ret; 3545 int ret;
2230 3546
2231 3547 if (vbo->pin_count > 0)
2232 /*
2233 * Don't validate pinned buffers.
2234 */
2235
2236 if (bo == dev_priv->pinned_bo ||
2237 (bo == dev_priv->dummy_query_bo &&
2238 dev_priv->dummy_query_bo_pinned))
2239 return 0; 3548 return 0;
2240 3549
2241 if (validate_as_mob) 3550 if (validate_as_mob)
2242 return ttm_bo_validate(bo, &vmw_mob_placement, true, false); 3551 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3552 false);
2243 3553
2244 /** 3554 /**
2245 * Put BO in VRAM if there is space, otherwise as a GMR. 3555 * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2248 * used as a GMR, this will return -ENOMEM. 3558 * used as a GMR, this will return -ENOMEM.
2249 */ 3559 */
2250 3560
2251 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); 3561 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3562 false);
2252 if (likely(ret == 0 || ret == -ERESTARTSYS)) 3563 if (likely(ret == 0 || ret == -ERESTARTSYS))
2253 return ret; 3564 return ret;
2254 3565
@@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2257 * previous contents. 3568 * previous contents.
2258 */ 3569 */
2259 3570
2260 DRM_INFO("Falling through to VRAM.\n"); 3571 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
2261 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2262 return ret; 3572 return ret;
2263} 3573}
2264 3574
@@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
2270 3580
2271 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { 3581 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2272 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, 3582 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3583 true,
2273 entry->validate_as_mob); 3584 entry->validate_as_mob);
2274 if (unlikely(ret != 0)) 3585 if (unlikely(ret != 0))
2275 return ret; 3586 return ret;
@@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2417 } 3728 }
2418} 3729}
2419 3730
3731/**
3732 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3733 * the fifo.
3734 *
3735 * @dev_priv: Pointer to a device private structure.
3736 * @kernel_commands: Pointer to the unpatched command batch.
3737 * @command_size: Size of the unpatched command batch.
3738 * @sw_context: Structure holding the relocation lists.
3739 *
3740 * Side effects: If this function returns 0, then the command batch
3741 * pointed to by @kernel_commands will have been modified.
3742 */
3743static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3744 void *kernel_commands,
3745 u32 command_size,
3746 struct vmw_sw_context *sw_context)
3747{
3748 void *cmd;
3749
3750 if (sw_context->dx_ctx_node)
3751 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3752 sw_context->dx_ctx_node->res->id);
3753 else
3754 cmd = vmw_fifo_reserve(dev_priv, command_size);
3755 if (!cmd) {
3756 DRM_ERROR("Failed reserving fifo space for commands.\n");
3757 return -ENOMEM;
3758 }
3759
3760 vmw_apply_relocations(sw_context);
3761 memcpy(cmd, kernel_commands, command_size);
3762 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3763 vmw_resource_relocations_free(&sw_context->res_relocations);
3764 vmw_fifo_commit(dev_priv, command_size);
3765
3766 return 0;
3767}
3768
3769/**
3770 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3771 * the command buffer manager.
3772 *
3773 * @dev_priv: Pointer to a device private structure.
3774 * @header: Opaque handle to the command buffer allocation.
3775 * @command_size: Size of the unpatched command batch.
3776 * @sw_context: Structure holding the relocation lists.
3777 *
3778 * Side effects: If this function returns 0, then the command buffer
3779 * represented by @header will have been modified.
3780 */
3781static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3782 struct vmw_cmdbuf_header *header,
3783 u32 command_size,
3784 struct vmw_sw_context *sw_context)
3785{
3786 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3787 SVGA3D_INVALID_ID);
3788 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3789 id, false, header);
3790
3791 vmw_apply_relocations(sw_context);
3792 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3793 vmw_resource_relocations_free(&sw_context->res_relocations);
3794 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3795
3796 return 0;
3797}
3798
3799/**
3800 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3801 * submission using a command buffer.
3802 *
3803 * @dev_priv: Pointer to a device private structure.
3804 * @user_commands: User-space pointer to the commands to be submitted.
3805 * @command_size: Size of the unpatched command batch.
3806 * @header: Out parameter returning the opaque pointer to the command buffer.
3807 *
3808 * This function checks whether we can use the command buffer manager for
3809 * submission and if so, creates a command buffer of suitable size and
3810 * copies the user data into that buffer.
3811 *
3812 * On successful return, the function returns a pointer to the data in the
3813 * command buffer and *@header is set to non-NULL.
3814 * If command buffers could not be used, the function will return the value
3815 * of @kernel_commands on function call. That value may be NULL. In that case,
3816 * the value of *@header will be set to NULL.
3817 * If an error is encountered, the function will return a pointer error value.
3818 * If the function is interrupted by a signal while sleeping, it will return
3819 * -ERESTARTSYS casted to a pointer error value.
3820 */
3821static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3822 void __user *user_commands,
3823 void *kernel_commands,
3824 u32 command_size,
3825 struct vmw_cmdbuf_header **header)
3826{
3827 size_t cmdbuf_size;
3828 int ret;
3829
3830 *header = NULL;
3831 if (!dev_priv->cman || kernel_commands)
3832 return kernel_commands;
3833
3834 if (command_size > SVGA_CB_MAX_SIZE) {
3835 DRM_ERROR("Command buffer is too large.\n");
3836 return ERR_PTR(-EINVAL);
3837 }
3838
3839 /* If possible, add a little space for fencing. */
3840 cmdbuf_size = command_size + 512;
3841 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3842 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3843 true, header);
3844 if (IS_ERR(kernel_commands))
3845 return kernel_commands;
3846
3847 ret = copy_from_user(kernel_commands, user_commands,
3848 command_size);
3849 if (ret) {
3850 DRM_ERROR("Failed copying commands.\n");
3851 vmw_cmdbuf_header_free(*header);
3852 *header = NULL;
3853 return ERR_PTR(-EFAULT);
3854 }
3855
3856 return kernel_commands;
3857}
3858
3859static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3860 struct vmw_sw_context *sw_context,
3861 uint32_t handle)
3862{
3863 struct vmw_resource_val_node *ctx_node;
3864 struct vmw_resource *res;
3865 int ret;
3866
3867 if (handle == SVGA3D_INVALID_ID)
3868 return 0;
2420 3869
3870 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3871 handle, user_context_converter,
3872 &res);
3873 if (unlikely(ret != 0)) {
3874 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3875 (unsigned) handle);
3876 return ret;
3877 }
3878
3879 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3880 if (unlikely(ret != 0))
3881 goto out_err;
3882
3883 sw_context->dx_ctx_node = ctx_node;
3884 sw_context->man = vmw_context_res_man(res);
3885out_err:
3886 vmw_resource_unreference(&res);
3887 return ret;
3888}
2421 3889
2422int vmw_execbuf_process(struct drm_file *file_priv, 3890int vmw_execbuf_process(struct drm_file *file_priv,
2423 struct vmw_private *dev_priv, 3891 struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2425 void *kernel_commands, 3893 void *kernel_commands,
2426 uint32_t command_size, 3894 uint32_t command_size,
2427 uint64_t throttle_us, 3895 uint64_t throttle_us,
3896 uint32_t dx_context_handle,
2428 struct drm_vmw_fence_rep __user *user_fence_rep, 3897 struct drm_vmw_fence_rep __user *user_fence_rep,
2429 struct vmw_fence_obj **out_fence) 3898 struct vmw_fence_obj **out_fence)
2430{ 3899{
@@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2432 struct vmw_fence_obj *fence = NULL; 3901 struct vmw_fence_obj *fence = NULL;
2433 struct vmw_resource *error_resource; 3902 struct vmw_resource *error_resource;
2434 struct list_head resource_list; 3903 struct list_head resource_list;
3904 struct vmw_cmdbuf_header *header;
2435 struct ww_acquire_ctx ticket; 3905 struct ww_acquire_ctx ticket;
2436 uint32_t handle; 3906 uint32_t handle;
2437 void *cmd;
2438 int ret; 3907 int ret;
2439 3908
3909 if (throttle_us) {
3910 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3911 throttle_us);
3912
3913 if (ret)
3914 return ret;
3915 }
3916
3917 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3918 kernel_commands, command_size,
3919 &header);
3920 if (IS_ERR(kernel_commands))
3921 return PTR_ERR(kernel_commands);
3922
2440 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 3923 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2441 if (unlikely(ret != 0)) 3924 if (ret) {
2442 return -ERESTARTSYS; 3925 ret = -ERESTARTSYS;
3926 goto out_free_header;
3927 }
2443 3928
3929 sw_context->kernel = false;
2444 if (kernel_commands == NULL) { 3930 if (kernel_commands == NULL) {
2445 sw_context->kernel = false;
2446
2447 ret = vmw_resize_cmd_bounce(sw_context, command_size); 3931 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2448 if (unlikely(ret != 0)) 3932 if (unlikely(ret != 0))
2449 goto out_unlock; 3933 goto out_unlock;
@@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2458 goto out_unlock; 3942 goto out_unlock;
2459 } 3943 }
2460 kernel_commands = sw_context->cmd_bounce; 3944 kernel_commands = sw_context->cmd_bounce;
2461 } else 3945 } else if (!header)
2462 sw_context->kernel = true; 3946 sw_context->kernel = true;
2463 3947
2464 sw_context->fp = vmw_fpriv(file_priv); 3948 sw_context->fp = vmw_fpriv(file_priv);
2465 sw_context->cur_reloc = 0; 3949 sw_context->cur_reloc = 0;
2466 sw_context->cur_val_buf = 0; 3950 sw_context->cur_val_buf = 0;
2467 INIT_LIST_HEAD(&sw_context->resource_list); 3951 INIT_LIST_HEAD(&sw_context->resource_list);
3952 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
2468 sw_context->cur_query_bo = dev_priv->pinned_bo; 3953 sw_context->cur_query_bo = dev_priv->pinned_bo;
2469 sw_context->last_query_ctx = NULL; 3954 sw_context->last_query_ctx = NULL;
2470 sw_context->needs_post_query_barrier = false; 3955 sw_context->needs_post_query_barrier = false;
3956 sw_context->dx_ctx_node = NULL;
3957 sw_context->dx_query_mob = NULL;
3958 sw_context->dx_query_ctx = NULL;
2471 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 3959 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2472 INIT_LIST_HEAD(&sw_context->validate_nodes); 3960 INIT_LIST_HEAD(&sw_context->validate_nodes);
2473 INIT_LIST_HEAD(&sw_context->res_relocations); 3961 INIT_LIST_HEAD(&sw_context->res_relocations);
3962 if (sw_context->staged_bindings)
3963 vmw_binding_state_reset(sw_context->staged_bindings);
3964
2474 if (!sw_context->res_ht_initialized) { 3965 if (!sw_context->res_ht_initialized) {
2475 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 3966 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2476 if (unlikely(ret != 0)) 3967 if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2478 sw_context->res_ht_initialized = true; 3969 sw_context->res_ht_initialized = true;
2479 } 3970 }
2480 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 3971 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2481
2482 INIT_LIST_HEAD(&resource_list); 3972 INIT_LIST_HEAD(&resource_list);
3973 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3974 if (unlikely(ret != 0)) {
3975 list_splice_init(&sw_context->ctx_resource_list,
3976 &sw_context->resource_list);
3977 goto out_err_nores;
3978 }
3979
2483 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 3980 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2484 command_size); 3981 command_size);
3982 /*
3983 * Merge the resource lists before checking the return status
3984 * from vmd_cmd_check_all so that all the open hashtabs will
3985 * be handled properly even if vmw_cmd_check_all fails.
3986 */
3987 list_splice_init(&sw_context->ctx_resource_list,
3988 &sw_context->resource_list);
3989
2485 if (unlikely(ret != 0)) 3990 if (unlikely(ret != 0))
2486 goto out_err_nores; 3991 goto out_err_nores;
2487 3992
@@ -2492,7 +3997,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 3997 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2493 true, NULL); 3998 true, NULL);
2494 if (unlikely(ret != 0)) 3999 if (unlikely(ret != 0))
2495 goto out_err; 4000 goto out_err_nores;
2496 4001
2497 ret = vmw_validate_buffers(dev_priv, sw_context); 4002 ret = vmw_validate_buffers(dev_priv, sw_context);
2498 if (unlikely(ret != 0)) 4003 if (unlikely(ret != 0))
@@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2502 if (unlikely(ret != 0)) 4007 if (unlikely(ret != 0))
2503 goto out_err; 4008 goto out_err;
2504 4009
2505 if (throttle_us) {
2506 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2507 throttle_us);
2508
2509 if (unlikely(ret != 0))
2510 goto out_err;
2511 }
2512
2513 ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 4010 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2514 if (unlikely(ret != 0)) { 4011 if (unlikely(ret != 0)) {
2515 ret = -ERESTARTSYS; 4012 ret = -ERESTARTSYS;
@@ -2522,20 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2522 goto out_unlock_binding; 4019 goto out_unlock_binding;
2523 } 4020 }
2524 4021
2525 cmd = vmw_fifo_reserve(dev_priv, command_size); 4022 if (!header) {
2526 if (unlikely(cmd == NULL)) { 4023 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
2527 DRM_ERROR("Failed reserving fifo space for commands.\n"); 4024 command_size, sw_context);
2528 ret = -ENOMEM; 4025 } else {
2529 goto out_unlock_binding; 4026 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4027 sw_context);
4028 header = NULL;
2530 } 4029 }
2531 4030 mutex_unlock(&dev_priv->binding_mutex);
2532 vmw_apply_relocations(sw_context); 4031 if (ret)
2533 memcpy(cmd, kernel_commands, command_size); 4032 goto out_err;
2534
2535 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2536 vmw_resource_relocations_free(&sw_context->res_relocations);
2537
2538 vmw_fifo_commit(dev_priv, command_size);
2539 4033
2540 vmw_query_bo_switch_commit(dev_priv, sw_context); 4034 vmw_query_bo_switch_commit(dev_priv, sw_context);
2541 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 4035 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2550,8 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2550 if (ret != 0) 4044 if (ret != 0)
2551 DRM_ERROR("Fence submission error. Syncing.\n"); 4045 DRM_ERROR("Fence submission error. Syncing.\n");
2552 4046
2553 vmw_resource_list_unreserve(&sw_context->resource_list, false); 4047 vmw_resources_unreserve(sw_context, false);
2554 mutex_unlock(&dev_priv->binding_mutex);
2555 4048
2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 4049 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2557 (void *) fence); 4050 (void *) fence);
@@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2580 * Unreference resources outside of the cmdbuf_mutex to 4073 * Unreference resources outside of the cmdbuf_mutex to
2581 * avoid deadlocks in resource destruction paths. 4074 * avoid deadlocks in resource destruction paths.
2582 */ 4075 */
2583 vmw_resource_list_unreference(&resource_list); 4076 vmw_resource_list_unreference(sw_context, &resource_list);
2584 4077
2585 return 0; 4078 return 0;
2586 4079
@@ -2589,7 +4082,7 @@ out_unlock_binding:
2589out_err: 4082out_err:
2590 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 4083 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2591out_err_nores: 4084out_err_nores:
2592 vmw_resource_list_unreserve(&sw_context->resource_list, true); 4085 vmw_resources_unreserve(sw_context, true);
2593 vmw_resource_relocations_free(&sw_context->res_relocations); 4086 vmw_resource_relocations_free(&sw_context->res_relocations);
2594 vmw_free_relocations(sw_context); 4087 vmw_free_relocations(sw_context);
2595 vmw_clear_validations(sw_context); 4088 vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@ out_unlock:
2607 * Unreference resources outside of the cmdbuf_mutex to 4100 * Unreference resources outside of the cmdbuf_mutex to
2608 * avoid deadlocks in resource destruction paths. 4101 * avoid deadlocks in resource destruction paths.
2609 */ 4102 */
2610 vmw_resource_list_unreference(&resource_list); 4103 vmw_resource_list_unreference(sw_context, &resource_list);
2611 if (unlikely(error_resource != NULL)) 4104 if (unlikely(error_resource != NULL))
2612 vmw_resource_unreference(&error_resource); 4105 vmw_resource_unreference(&error_resource);
4106out_free_header:
4107 if (header)
4108 vmw_cmdbuf_header_free(header);
2613 4109
2614 return ret; 4110 return ret;
2615} 4111}
@@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2628 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); 4124 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2629 4125
2630 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); 4126 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2631 vmw_bo_pin(dev_priv->pinned_bo, false); 4127 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2632 vmw_bo_pin(dev_priv->dummy_query_bo, false); 4128 if (dev_priv->dummy_query_bo_pinned) {
2633 dev_priv->dummy_query_bo_pinned = false; 4129 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4130 dev_priv->dummy_query_bo_pinned = false;
4131 }
2634} 4132}
2635 4133
2636 4134
@@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2672 4170
2673 INIT_LIST_HEAD(&validate_list); 4171 INIT_LIST_HEAD(&validate_list);
2674 4172
2675 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); 4173 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
2676 pinned_val.shared = false; 4174 pinned_val.shared = false;
2677 list_add_tail(&pinned_val.head, &validate_list); 4175 list_add_tail(&pinned_val.head, &validate_list);
2678 4176
2679 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); 4177 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
2680 query_val.shared = false; 4178 query_val.shared = false;
2681 list_add_tail(&query_val.head, &validate_list); 4179 list_add_tail(&query_val.head, &validate_list);
2682 4180
@@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2697 dev_priv->query_cid_valid = false; 4195 dev_priv->query_cid_valid = false;
2698 } 4196 }
2699 4197
2700 vmw_bo_pin(dev_priv->pinned_bo, false); 4198 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2701 vmw_bo_pin(dev_priv->dummy_query_bo, false); 4199 if (dev_priv->dummy_query_bo_pinned) {
2702 dev_priv->dummy_query_bo_pinned = false; 4200 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2703 4201 dev_priv->dummy_query_bo_pinned = false;
4202 }
2704 if (fence == NULL) { 4203 if (fence == NULL) {
2705 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, 4204 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2706 NULL); 4205 NULL);
@@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2712 4211
2713 ttm_bo_unref(&query_val.bo); 4212 ttm_bo_unref(&query_val.bo);
2714 ttm_bo_unref(&pinned_val.bo); 4213 ttm_bo_unref(&pinned_val.bo);
2715 ttm_bo_unref(&dev_priv->pinned_bo); 4214 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4215 DRM_INFO("Dummy query bo pin count: %d\n",
4216 dev_priv->dummy_query_bo->pin_count);
2716 4217
2717out_unlock: 4218out_unlock:
2718 return; 4219 return;
@@ -2722,7 +4223,7 @@ out_no_emit:
2722out_no_reserve: 4223out_no_reserve:
2723 ttm_bo_unref(&query_val.bo); 4224 ttm_bo_unref(&query_val.bo);
2724 ttm_bo_unref(&pinned_val.bo); 4225 ttm_bo_unref(&pinned_val.bo);
2725 ttm_bo_unref(&dev_priv->pinned_bo); 4226 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2726} 4227}
2727 4228
2728/** 4229/**
@@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2751 mutex_unlock(&dev_priv->cmdbuf_mutex); 4252 mutex_unlock(&dev_priv->cmdbuf_mutex);
2752} 4253}
2753 4254
2754 4255int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
2755int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 4256 struct drm_file *file_priv, size_t size)
2756 struct drm_file *file_priv)
2757{ 4257{
2758 struct vmw_private *dev_priv = vmw_priv(dev); 4258 struct vmw_private *dev_priv = vmw_priv(dev);
2759 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; 4259 struct drm_vmw_execbuf_arg arg;
2760 int ret; 4260 int ret;
4261 static const size_t copy_offset[] = {
4262 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4263 sizeof(struct drm_vmw_execbuf_arg)};
4264
4265 if (unlikely(size < copy_offset[0])) {
4266 DRM_ERROR("Invalid command size, ioctl %d\n",
4267 DRM_VMW_EXECBUF);
4268 return -EINVAL;
4269 }
4270
4271 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4272 return -EFAULT;
2761 4273
2762 /* 4274 /*
2763 * This will allow us to extend the ioctl argument while 4275 * Extend the ioctl argument while
2764 * maintaining backwards compatibility: 4276 * maintaining backwards compatibility:
2765 * We take different code paths depending on the value of 4277 * We take different code paths depending on the value of
2766 * arg->version. 4278 * arg.version.
2767 */ 4279 */
2768 4280
2769 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { 4281 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4282 arg.version == 0)) {
2770 DRM_ERROR("Incorrect execbuf version.\n"); 4283 DRM_ERROR("Incorrect execbuf version.\n");
2771 DRM_ERROR("You're running outdated experimental "
2772 "vmwgfx user-space drivers.");
2773 return -EINVAL; 4284 return -EINVAL;
2774 } 4285 }
2775 4286
4287 if (arg.version > 1 &&
4288 copy_from_user(&arg.context_handle,
4289 (void __user *) (data + copy_offset[0]),
4290 copy_offset[arg.version - 1] -
4291 copy_offset[0]) != 0)
4292 return -EFAULT;
4293
4294 switch (arg.version) {
4295 case 1:
4296 arg.context_handle = (uint32_t) -1;
4297 break;
4298 case 2:
4299 if (arg.pad64 != 0) {
4300 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4301 return -EINVAL;
4302 }
4303 break;
4304 default:
4305 break;
4306 }
4307
2776 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 4308 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2777 if (unlikely(ret != 0)) 4309 if (unlikely(ret != 0))
2778 return ret; 4310 return ret;
2779 4311
2780 ret = vmw_execbuf_process(file_priv, dev_priv, 4312 ret = vmw_execbuf_process(file_priv, dev_priv,
2781 (void __user *)(unsigned long)arg->commands, 4313 (void __user *)(unsigned long)arg.commands,
2782 NULL, arg->command_size, arg->throttle_us, 4314 NULL, arg.command_size, arg.throttle_us,
2783 (void __user *)(unsigned long)arg->fence_rep, 4315 arg.context_handle,
4316 (void __user *)(unsigned long)arg.fence_rep,
2784 NULL); 4317 NULL);
2785 ttm_read_unlock(&dev_priv->reservation_sem); 4318 ttm_read_unlock(&dev_priv->reservation_sem);
2786 if (unlikely(ret != 0)) 4319 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f391fad..042c5b4c706c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2007 David Airlie 3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved. 5 * All Rights Reserved.
6 * 6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a 7 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "vmwgfx_drv.h" 32#include "vmwgfx_drv.h"
33#include "vmwgfx_kms.h"
33 34
34#include <drm/ttm/ttm_placement.h> 35#include <drm/ttm/ttm_placement.h>
35 36
@@ -40,21 +41,22 @@ struct vmw_fb_par {
40 41
41 void *vmalloc; 42 void *vmalloc;
42 43
44 struct mutex bo_mutex;
43 struct vmw_dma_buffer *vmw_bo; 45 struct vmw_dma_buffer *vmw_bo;
44 struct ttm_bo_kmap_obj map; 46 struct ttm_bo_kmap_obj map;
47 void *bo_ptr;
48 unsigned bo_size;
49 struct drm_framebuffer *set_fb;
50 struct drm_display_mode *set_mode;
51 u32 fb_x;
52 u32 fb_y;
53 bool bo_iowrite;
45 54
46 u32 pseudo_palette[17]; 55 u32 pseudo_palette[17];
47 56
48 unsigned depth;
49 unsigned bpp;
50
51 unsigned max_width; 57 unsigned max_width;
52 unsigned max_height; 58 unsigned max_height;
53 59
54 void *bo_ptr;
55 unsigned bo_size;
56 bool bo_iowrite;
57
58 struct { 60 struct {
59 spinlock_t lock; 61 spinlock_t lock;
60 bool active; 62 bool active;
@@ -63,6 +65,11 @@ struct vmw_fb_par {
63 unsigned x2; 65 unsigned x2;
64 unsigned y2; 66 unsigned y2;
65 } dirty; 67 } dirty;
68
69 struct drm_crtc *crtc;
70 struct drm_connector *con;
71
72 bool local_mode;
66}; 73};
67 74
68static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, 75static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +84,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
77 return 1; 84 return 1;
78 } 85 }
79 86
80 switch (par->depth) { 87 switch (par->set_fb->depth) {
81 case 24: 88 case 24:
82 case 32: 89 case 32:
83 pal[regno] = ((red & 0xff00) << 8) | 90 pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +92,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
85 ((blue & 0xff00) >> 8); 92 ((blue & 0xff00) >> 8);
86 break; 93 break;
87 default: 94 default:
88 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); 95 DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
96 par->set_fb->bits_per_pixel);
89 return 1; 97 return 1;
90 } 98 }
91 99
@@ -134,12 +142,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
134 return -EINVAL; 142 return -EINVAL;
135 } 143 }
136 144
137 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
138 (var->xoffset != 0 || var->yoffset != 0)) {
139 DRM_ERROR("Can not handle panning without display topology\n");
140 return -EINVAL;
141 }
142
143 if ((var->xoffset + var->xres) > par->max_width || 145 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) { 146 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 147 DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +158,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
156 return 0; 158 return 0;
157} 159}
158 160
159static int vmw_fb_set_par(struct fb_info *info)
160{
161 struct vmw_fb_par *par = info->par;
162 struct vmw_private *vmw_priv = par->vmw_priv;
163 int ret;
164
165 info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
166
167 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
168 info->fix.line_length,
169 par->bpp, par->depth);
170 if (ret)
171 return ret;
172
173 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
174 /* TODO check if pitch and offset changes */
175 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
176 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
177 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
178 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
183 }
184
185 /* This is really helpful since if this fails the user
186 * can probably not see anything on the screen.
187 */
188 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
189
190 return 0;
191}
192
193static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
194 struct fb_info *info)
195{
196 return 0;
197}
198
199static int vmw_fb_blank(int blank, struct fb_info *info) 161static int vmw_fb_blank(int blank, struct fb_info *info)
200{ 162{
201 return 0; 163 return 0;
@@ -209,54 +171,77 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
209{ 171{
210 struct vmw_private *vmw_priv = par->vmw_priv; 172 struct vmw_private *vmw_priv = par->vmw_priv;
211 struct fb_info *info = vmw_priv->fb_info; 173 struct fb_info *info = vmw_priv->fb_info;
212 int stride = (info->fix.line_length / 4); 174 unsigned long irq_flags;
213 int *src = (int *)info->screen_base; 175 s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
214 __le32 __iomem *vram_mem = par->bo_ptr; 176 u32 cpp, max_x, max_y;
215 unsigned long flags; 177 struct drm_clip_rect clip;
216 unsigned x, y, w, h; 178 struct drm_framebuffer *cur_fb;
217 int i, k; 179 u8 *src_ptr, *dst_ptr;
218 struct {
219 uint32_t header;
220 SVGAFifoCmdUpdate body;
221 } *cmd;
222 180
223 if (vmw_priv->suspended) 181 if (vmw_priv->suspended)
224 return; 182 return;
225 183
226 spin_lock_irqsave(&par->dirty.lock, flags); 184 mutex_lock(&par->bo_mutex);
227 if (!par->dirty.active) { 185 cur_fb = par->set_fb;
228 spin_unlock_irqrestore(&par->dirty.lock, flags); 186 if (!cur_fb)
229 return; 187 goto out_unlock;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238 188
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { 189 spin_lock_irqsave(&par->dirty.lock, irq_flags);
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) 190 if (!par->dirty.active) {
241 iowrite32(src[k], vram_mem + k); 191 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
192 goto out_unlock;
242 } 193 }
243 194
244#if 0 195 /*
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); 196 * Handle panning when copying from vmalloc to framebuffer.
246#endif 197 * Clip dirty area to framebuffer.
198 */
199 cpp = (cur_fb->bits_per_pixel + 7) / 8;
200 max_x = par->fb_x + cur_fb->width;
201 max_y = par->fb_y + cur_fb->height;
202
203 dst_x1 = par->dirty.x1 - par->fb_x;
204 dst_y1 = par->dirty.y1 - par->fb_y;
205 dst_x1 = max_t(s32, dst_x1, 0);
206 dst_y1 = max_t(s32, dst_y1, 0);
207
208 dst_x2 = par->dirty.x2 - par->fb_x;
209 dst_y2 = par->dirty.y2 - par->fb_y;
210 dst_x2 = min_t(s32, dst_x2, max_x);
211 dst_y2 = min_t(s32, dst_y2, max_y);
212 w = dst_x2 - dst_x1;
213 h = dst_y2 - dst_y1;
214 w = max_t(s32, 0, w);
215 h = max_t(s32, 0, h);
247 216
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); 217 par->dirty.x1 = par->dirty.x2 = 0;
249 if (unlikely(cmd == NULL)) { 218 par->dirty.y1 = par->dirty.y2 = 0;
250 DRM_ERROR("Fifo reserve failed.\n"); 219 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
251 return; 220
221 if (w && h) {
222 dst_ptr = (u8 *)par->bo_ptr +
223 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
224 src_ptr = (u8 *)par->vmalloc +
225 ((dst_y1 + par->fb_y) * info->fix.line_length +
226 (dst_x1 + par->fb_x) * cpp);
227
228 while (h-- > 0) {
229 memcpy(dst_ptr, src_ptr, w*cpp);
230 dst_ptr += par->set_fb->pitches[0];
231 src_ptr += info->fix.line_length;
232 }
233
234 clip.x1 = dst_x1;
235 clip.x2 = dst_x2;
236 clip.y1 = dst_y1;
237 clip.y2 = dst_y2;
238
239 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
240 &clip, 1));
241 vmw_fifo_flush(vmw_priv, false);
252 } 242 }
253 243out_unlock:
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); 244 mutex_unlock(&par->bo_mutex);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260} 245}
261 246
262static void vmw_fb_dirty_mark(struct vmw_fb_par *par, 247static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
@@ -291,6 +276,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
291 spin_unlock_irqrestore(&par->dirty.lock, flags); 276 spin_unlock_irqrestore(&par->dirty.lock, flags);
292} 277}
293 278
279static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
280 struct fb_info *info)
281{
282 struct vmw_fb_par *par = info->par;
283
284 if ((var->xoffset + var->xres) > var->xres_virtual ||
285 (var->yoffset + var->yres) > var->yres_virtual) {
286 DRM_ERROR("Requested panning can not fit in framebuffer\n");
287 return -EINVAL;
288 }
289
290 mutex_lock(&par->bo_mutex);
291 par->fb_x = var->xoffset;
292 par->fb_y = var->yoffset;
293 if (par->set_fb)
294 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
295 par->set_fb->height);
296 mutex_unlock(&par->bo_mutex);
297
298 return 0;
299}
300
294static void vmw_deferred_io(struct fb_info *info, 301static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist) 302 struct list_head *pagelist)
296{ 303{
@@ -324,7 +331,7 @@ static void vmw_deferred_io(struct fb_info *info,
324 vmw_fb_dirty_flush(par); 331 vmw_fb_dirty_flush(par);
325}; 332};
326 333
327struct fb_deferred_io vmw_defio = { 334static struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY, 335 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io, 336 .deferred_io = vmw_deferred_io,
330}; 337};
@@ -358,33 +365,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
358 * Bring up code 365 * Bring up code
359 */ 366 */
360 367
361static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371};
372
373static int vmw_fb_create_bo(struct vmw_private *vmw_priv, 368static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out) 369 size_t size, struct vmw_dma_buffer **out)
375{ 370{
376 struct vmw_dma_buffer *vmw_bo; 371 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
378 struct ttm_placement ne_placement;
379 int ret; 372 int ret;
380 373
381 ne_placement.num_placement = 1;
382 ne_placement.placement = &ne_place;
383 ne_placement.num_busy_placement = 1;
384 ne_placement.busy_placement = &ne_place;
385
386 ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387
388 (void) ttm_write_lock(&vmw_priv->reservation_sem, false); 374 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
389 375
390 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); 376 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
@@ -394,31 +380,265 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394 } 380 }
395 381
396 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, 382 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
397 &ne_placement, 383 &vmw_sys_placement,
398 false, 384 false,
399 &vmw_dmabuf_bo_free); 385 &vmw_dmabuf_bo_free);
400 if (unlikely(ret != 0)) 386 if (unlikely(ret != 0))
401 goto err_unlock; /* init frees the buffer on failure */ 387 goto err_unlock; /* init frees the buffer on failure */
402 388
403 *out = vmw_bo; 389 *out = vmw_bo;
404 390 ttm_write_unlock(&vmw_priv->reservation_sem);
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 391
407 return 0; 392 return 0;
408 393
409err_unlock: 394err_unlock:
410 ttm_write_unlock(&vmw_priv->fbdev_master.lock); 395 ttm_write_unlock(&vmw_priv->reservation_sem);
396 return ret;
397}
398
399static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
400 int *depth)
401{
402 switch (var->bits_per_pixel) {
403 case 32:
404 *depth = (var->transp.length > 0) ? 32 : 24;
405 break;
406 default:
407 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
408 return -EINVAL;
409 }
410
411 return 0;
412}
413
414static int vmw_fb_kms_detach(struct vmw_fb_par *par,
415 bool detach_bo,
416 bool unref_bo)
417{
418 struct drm_framebuffer *cur_fb = par->set_fb;
419 int ret;
420
421 /* Detach the KMS framebuffer from crtcs */
422 if (par->set_mode) {
423 struct drm_mode_set set;
424
425 set.crtc = par->crtc;
426 set.x = 0;
427 set.y = 0;
428 set.mode = NULL;
429 set.fb = NULL;
430 set.num_connectors = 1;
431 set.connectors = &par->con;
432 ret = drm_mode_set_config_internal(&set);
433 if (ret) {
434 DRM_ERROR("Could not unset a mode.\n");
435 return ret;
436 }
437 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
438 par->set_mode = NULL;
439 }
440
441 if (cur_fb) {
442 drm_framebuffer_unreference(cur_fb);
443 par->set_fb = NULL;
444 }
445
446 if (par->vmw_bo && detach_bo) {
447 if (par->bo_ptr) {
448 ttm_bo_kunmap(&par->map);
449 par->bo_ptr = NULL;
450 }
451 if (unref_bo)
452 vmw_dmabuf_unreference(&par->vmw_bo);
453 else
454 vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
455 }
456
457 return 0;
458}
459
460static int vmw_fb_kms_framebuffer(struct fb_info *info)
461{
462 struct drm_mode_fb_cmd mode_cmd;
463 struct vmw_fb_par *par = info->par;
464 struct fb_var_screeninfo *var = &info->var;
465 struct drm_framebuffer *cur_fb;
466 struct vmw_framebuffer *vfb;
467 int ret = 0;
468 size_t new_bo_size;
469
470 ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
471 if (ret)
472 return ret;
473
474 mode_cmd.width = var->xres;
475 mode_cmd.height = var->yres;
476 mode_cmd.bpp = var->bits_per_pixel;
477 mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
478
479 cur_fb = par->set_fb;
480 if (cur_fb && cur_fb->width == mode_cmd.width &&
481 cur_fb->height == mode_cmd.height &&
482 cur_fb->bits_per_pixel == mode_cmd.bpp &&
483 cur_fb->depth == mode_cmd.depth &&
484 cur_fb->pitches[0] == mode_cmd.pitch)
485 return 0;
486
487 /* Need new buffer object ? */
488 new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
489 ret = vmw_fb_kms_detach(par,
490 par->bo_size < new_bo_size ||
491 par->bo_size > 2*new_bo_size,
492 true);
493 if (ret)
494 return ret;
495
496 if (!par->vmw_bo) {
497 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
498 &par->vmw_bo);
499 if (ret) {
500 DRM_ERROR("Failed creating a buffer object for "
501 "fbdev.\n");
502 return ret;
503 }
504 par->bo_size = new_bo_size;
505 }
506
507 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
508 true, &mode_cmd);
509 if (IS_ERR(vfb))
510 return PTR_ERR(vfb);
511
512 par->set_fb = &vfb->base;
513
514 if (!par->bo_ptr) {
515 /*
516 * Pin before mapping. Since we don't know in what placement
517 * to pin, call into KMS to do it for us.
518 */
519 ret = vfb->pin(vfb);
520 if (ret) {
521 DRM_ERROR("Could not pin the fbdev framebuffer.\n");
522 return ret;
523 }
524
525 ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
526 par->vmw_bo->base.num_pages, &par->map);
527 if (ret) {
528 vfb->unpin(vfb);
529 DRM_ERROR("Could not map the fbdev framebuffer.\n");
530 return ret;
531 }
532
533 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
534 }
535
536 return 0;
537}
538
539static int vmw_fb_set_par(struct fb_info *info)
540{
541 struct vmw_fb_par *par = info->par;
542 struct vmw_private *vmw_priv = par->vmw_priv;
543 struct drm_mode_set set;
544 struct fb_var_screeninfo *var = &info->var;
545 struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
546 DRM_MODE_TYPE_DRIVER,
547 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
548 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
549 };
550 struct drm_display_mode *old_mode;
551 struct drm_display_mode *mode;
552 int ret;
553
554 old_mode = par->set_mode;
555 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
556 if (!mode) {
557 DRM_ERROR("Could not create new fb mode.\n");
558 return -ENOMEM;
559 }
560
561 mode->hdisplay = var->xres;
562 mode->vdisplay = var->yres;
563 vmw_guess_mode_timing(mode);
564
565 if (old_mode && drm_mode_equal(old_mode, mode)) {
566 drm_mode_destroy(vmw_priv->dev, mode);
567 mode = old_mode;
568 old_mode = NULL;
569 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
570 mode->hdisplay *
571 (var->bits_per_pixel + 7) / 8,
572 mode->vdisplay)) {
573 drm_mode_destroy(vmw_priv->dev, mode);
574 return -EINVAL;
575 }
576
577 mutex_lock(&par->bo_mutex);
578 drm_modeset_lock_all(vmw_priv->dev);
579 ret = vmw_fb_kms_framebuffer(info);
580 if (ret)
581 goto out_unlock;
582
583 par->fb_x = var->xoffset;
584 par->fb_y = var->yoffset;
585
586 set.crtc = par->crtc;
587 set.x = 0;
588 set.y = 0;
589 set.mode = mode;
590 set.fb = par->set_fb;
591 set.num_connectors = 1;
592 set.connectors = &par->con;
593
594 ret = drm_mode_set_config_internal(&set);
595 if (ret)
596 goto out_unlock;
597
598 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
599 par->set_fb->width, par->set_fb->height);
600
601 /* If there already was stuff dirty we wont
602 * schedule a new work, so lets do it now */
603
604#if (defined(VMWGFX_STANDALONE) && defined(VMWGFX_FB_DEFERRED))
605 schedule_delayed_work(&par->def_par.deferred_work, 0);
606#else
607 schedule_delayed_work(&info->deferred_work, 0);
608#endif
609
610out_unlock:
611 if (old_mode)
612 drm_mode_destroy(vmw_priv->dev, old_mode);
613 par->set_mode = mode;
614
615 drm_modeset_unlock_all(vmw_priv->dev);
616 mutex_unlock(&par->bo_mutex);
617
411 return ret; 618 return ret;
412} 619}
413 620
621
622static struct fb_ops vmw_fb_ops = {
623 .owner = THIS_MODULE,
624 .fb_check_var = vmw_fb_check_var,
625 .fb_set_par = vmw_fb_set_par,
626 .fb_setcolreg = vmw_fb_setcolreg,
627 .fb_fillrect = vmw_fb_fillrect,
628 .fb_copyarea = vmw_fb_copyarea,
629 .fb_imageblit = vmw_fb_imageblit,
630 .fb_pan_display = vmw_fb_pan_display,
631 .fb_blank = vmw_fb_blank,
632};
633
414int vmw_fb_init(struct vmw_private *vmw_priv) 634int vmw_fb_init(struct vmw_private *vmw_priv)
415{ 635{
416 struct device *device = &vmw_priv->dev->pdev->dev; 636 struct device *device = &vmw_priv->dev->pdev->dev;
417 struct vmw_fb_par *par; 637 struct vmw_fb_par *par;
418 struct fb_info *info; 638 struct fb_info *info;
419 unsigned initial_width, initial_height;
420 unsigned fb_width, fb_height; 639 unsigned fb_width, fb_height;
421 unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; 640 unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
641 struct drm_display_mode *init_mode;
422 int ret; 642 int ret;
423 643
424 fb_bpp = 32; 644 fb_bpp = 32;
@@ -428,9 +648,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
428 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); 648 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
429 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); 649 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
430 650
431 initial_width = min(vmw_priv->initial_width, fb_width);
432 initial_height = min(vmw_priv->initial_height, fb_height);
433
434 fb_pitch = fb_width * fb_bpp / 8; 651 fb_pitch = fb_width * fb_bpp / 8;
435 fb_size = fb_pitch * fb_height; 652 fb_size = fb_pitch * fb_height;
436 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); 653 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +661,34 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
444 */ 661 */
445 vmw_priv->fb_info = info; 662 vmw_priv->fb_info = info;
446 par = info->par; 663 par = info->par;
664 memset(par, 0, sizeof(*par));
447 par->vmw_priv = vmw_priv; 665 par->vmw_priv = vmw_priv;
448 par->depth = fb_depth;
449 par->bpp = fb_bpp;
450 par->vmalloc = NULL; 666 par->vmalloc = NULL;
451 par->max_width = fb_width; 667 par->max_width = fb_width;
452 par->max_height = fb_height; 668 par->max_height = fb_height;
453 669
670 drm_modeset_lock_all(vmw_priv->dev);
671 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
672 par->max_height, &par->con,
673 &par->crtc, &init_mode);
674 if (ret) {
675 drm_modeset_unlock_all(vmw_priv->dev);
676 goto err_kms;
677 }
678
679 info->var.xres = init_mode->hdisplay;
680 info->var.yres = init_mode->vdisplay;
681 drm_modeset_unlock_all(vmw_priv->dev);
682
454 /* 683 /*
455 * Create buffers and alloc memory 684 * Create buffers and alloc memory
456 */ 685 */
457 par->vmalloc = vmalloc(fb_size); 686 par->vmalloc = vzalloc(fb_size);
458 if (unlikely(par->vmalloc == NULL)) { 687 if (unlikely(par->vmalloc == NULL)) {
459 ret = -ENOMEM; 688 ret = -ENOMEM;
460 goto err_free; 689 goto err_free;
461 } 690 }
462 691
463 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
464 if (unlikely(ret != 0))
465 goto err_free;
466
467 ret = ttm_bo_kmap(&par->vmw_bo->base,
468 0,
469 par->vmw_bo->base.num_pages,
470 &par->map);
471 if (unlikely(ret != 0))
472 goto err_unref;
473 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
474 par->bo_size = fb_size;
475
476 /* 692 /*
477 * Fixed and var 693 * Fixed and var
478 */ 694 */
@@ -490,7 +706,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
490 info->fix.smem_len = fb_size; 706 info->fix.smem_len = fb_size;
491 707
492 info->pseudo_palette = par->pseudo_palette; 708 info->pseudo_palette = par->pseudo_palette;
493 info->screen_base = par->vmalloc; 709 info->screen_base = (char __iomem *)par->vmalloc;
494 info->screen_size = fb_size; 710 info->screen_size = fb_size;
495 711
496 info->flags = FBINFO_DEFAULT; 712 info->flags = FBINFO_DEFAULT;
@@ -508,18 +724,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
508 724
509 info->var.xres_virtual = fb_width; 725 info->var.xres_virtual = fb_width;
510 info->var.yres_virtual = fb_height; 726 info->var.yres_virtual = fb_height;
511 info->var.bits_per_pixel = par->bpp; 727 info->var.bits_per_pixel = fb_bpp;
512 info->var.xoffset = 0; 728 info->var.xoffset = 0;
513 info->var.yoffset = 0; 729 info->var.yoffset = 0;
514 info->var.activate = FB_ACTIVATE_NOW; 730 info->var.activate = FB_ACTIVATE_NOW;
515 info->var.height = -1; 731 info->var.height = -1;
516 info->var.width = -1; 732 info->var.width = -1;
517 733
518 info->var.xres = initial_width;
519 info->var.yres = initial_height;
520
521 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 734 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
522
523 info->apertures = alloc_apertures(1); 735 info->apertures = alloc_apertures(1);
524 if (!info->apertures) { 736 if (!info->apertures) {
525 ret = -ENOMEM; 737 ret = -ENOMEM;
@@ -535,6 +747,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
535 par->dirty.y1 = par->dirty.y2 = 0; 747 par->dirty.y1 = par->dirty.y2 = 0;
536 par->dirty.active = true; 748 par->dirty.active = true;
537 spin_lock_init(&par->dirty.lock); 749 spin_lock_init(&par->dirty.lock);
750 mutex_init(&par->bo_mutex);
538 info->fbdefio = &vmw_defio; 751 info->fbdefio = &vmw_defio;
539 fb_deferred_io_init(info); 752 fb_deferred_io_init(info);
540 753
@@ -542,16 +755,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
542 if (unlikely(ret != 0)) 755 if (unlikely(ret != 0))
543 goto err_defio; 756 goto err_defio;
544 757
758 vmw_fb_set_par(info);
759
545 return 0; 760 return 0;
546 761
547err_defio: 762err_defio:
548 fb_deferred_io_cleanup(info); 763 fb_deferred_io_cleanup(info);
549err_aper: 764err_aper:
550 ttm_bo_kunmap(&par->map);
551err_unref:
552 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
553err_free: 765err_free:
554 vfree(par->vmalloc); 766 vfree(par->vmalloc);
767err_kms:
555 framebuffer_release(info); 768 framebuffer_release(info);
556 vmw_priv->fb_info = NULL; 769 vmw_priv->fb_info = NULL;
557 770
@@ -562,22 +775,18 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
562{ 775{
563 struct fb_info *info; 776 struct fb_info *info;
564 struct vmw_fb_par *par; 777 struct vmw_fb_par *par;
565 struct ttm_buffer_object *bo;
566 778
567 if (!vmw_priv->fb_info) 779 if (!vmw_priv->fb_info)
568 return 0; 780 return 0;
569 781
570 info = vmw_priv->fb_info; 782 info = vmw_priv->fb_info;
571 par = info->par; 783 par = info->par;
572 bo = &par->vmw_bo->base;
573 par->vmw_bo = NULL;
574 784
575 /* ??? order */ 785 /* ??? order */
576 fb_deferred_io_cleanup(info); 786 fb_deferred_io_cleanup(info);
577 unregister_framebuffer(info); 787 unregister_framebuffer(info);
578 788
579 ttm_bo_kunmap(&par->map); 789 (void) vmw_fb_kms_detach(par, true, true);
580 ttm_bo_unref(&bo);
581 790
582 vfree(par->vmalloc); 791 vfree(par->vmalloc);
583 framebuffer_release(info); 792 framebuffer_release(info);
@@ -603,10 +812,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
603 812
604 flush_delayed_work(&info->deferred_work); 813 flush_delayed_work(&info->deferred_work);
605 814
606 par->bo_ptr = NULL; 815 mutex_lock(&par->bo_mutex);
607 ttm_bo_kunmap(&par->map); 816 (void) vmw_fb_kms_detach(par, true, false);
608 817 mutex_unlock(&par->bo_mutex);
609 vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
610 818
611 return 0; 819 return 0;
612} 820}
@@ -616,8 +824,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
616 struct fb_info *info; 824 struct fb_info *info;
617 struct vmw_fb_par *par; 825 struct vmw_fb_par *par;
618 unsigned long flags; 826 unsigned long flags;
619 bool dummy;
620 int ret;
621 827
622 if (!vmw_priv->fb_info) 828 if (!vmw_priv->fb_info)
623 return -EINVAL; 829 return -EINVAL;
@@ -625,38 +831,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
625 info = vmw_priv->fb_info; 831 info = vmw_priv->fb_info;
626 par = info->par; 832 par = info->par;
627 833
628 /* we are already active */ 834 vmw_fb_set_par(info);
629 if (par->bo_ptr != NULL)
630 return 0;
631
632 /* Make sure that all overlays are stoped when we take over */
633 vmw_overlay_stop_all(vmw_priv);
634
635 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
636 if (unlikely(ret != 0)) {
637 DRM_ERROR("could not move buffer to start of VRAM\n");
638 goto err_no_buffer;
639 }
640
641 ret = ttm_bo_kmap(&par->vmw_bo->base,
642 0,
643 par->vmw_bo->base.num_pages,
644 &par->map);
645 BUG_ON(ret != 0);
646 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
647
648 spin_lock_irqsave(&par->dirty.lock, flags); 835 spin_lock_irqsave(&par->dirty.lock, flags);
649 par->dirty.active = true; 836 par->dirty.active = true;
650 spin_unlock_irqrestore(&par->dirty.lock, flags); 837 spin_unlock_irqrestore(&par->dirty.lock, flags);
651 838
652err_no_buffer:
653 vmw_fb_set_par(info);
654
655 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
656
657 /* If there already was stuff dirty we wont
658 * schedule a new work, so lets do it now */
659 schedule_delayed_work(&info->deferred_work, 0);
660
661 return 0; 839 return 0;
662} 840}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0dad92..567ddede51d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
142 struct vmw_fence_manager *fman = fman_from_fence(fence); 142 struct vmw_fence_manager *fman = fman_from_fence(fence);
143 struct vmw_private *dev_priv = fman->dev_priv; 143 struct vmw_private *dev_priv = fman->dev_priv;
144 144
145 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 145 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
148 return false; 148 return false;
@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
386 u32 passed_seqno) 386 u32 passed_seqno)
387{ 387{
388 u32 goal_seqno; 388 u32 goal_seqno;
389 __le32 __iomem *fifo_mem; 389 u32 __iomem *fifo_mem;
390 struct vmw_fence_obj *fence; 390 struct vmw_fence_obj *fence;
391 391
392 if (likely(!fman->seqno_valid)) 392 if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
430{ 430{
431 struct vmw_fence_manager *fman = fman_from_fence(fence); 431 struct vmw_fence_manager *fman = fman_from_fence(fence);
432 u32 goal_seqno; 432 u32 goal_seqno;
433 __le32 __iomem *fifo_mem; 433 u32 __iomem *fifo_mem;
434 434
435 if (fence_is_signaled_locked(&fence->base)) 435 if (fence_is_signaled_locked(&fence->base))
436 return false; 436 return false;
@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
453 struct list_head action_list; 453 struct list_head action_list;
454 bool needs_rerun; 454 bool needs_rerun;
455 uint32_t seqno, new_seqno; 455 uint32_t seqno, new_seqno;
456 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; 456 u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
457 457
458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
459rerun: 459rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add39208..8be6c29f5eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03888e7..80c40c31d4f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/ttm/ttm_placement.h> 30#include <drm/ttm/ttm_placement.h>
31 31
32struct vmw_temp_set_context {
33 SVGA3dCmdHeader header;
34 SVGA3dCmdDXTempSetContext body;
35};
36
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv) 37bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{ 38{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 39 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 40 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 41 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
37 42
@@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
71 if (hwversion < SVGA3D_HWVERSION_WS8_B1) 76 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
72 return false; 77 return false;
73 78
74 /* Non-Screen Object path does not support surfaces */ 79 /* Legacy Display Unit does not support surfaces */
75 if (!dev_priv->sou_priv) 80 if (dev_priv->active_display_unit == vmw_du_legacy)
76 return false; 81 return false;
77 82
78 return true; 83 return true;
@@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
80 85
81bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) 86bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
82{ 87{
83 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 88 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
84 uint32_t caps; 89 uint32_t caps;
85 90
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 91 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
95 100
96int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 101int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
97{ 102{
98 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 103 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
99 uint32_t max; 104 uint32_t max;
100 uint32_t min; 105 uint32_t min;
101 uint32_t dummy;
102 106
107 fifo->dx = false;
103 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; 108 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
104 fifo->static_buffer = vmalloc(fifo->static_buffer_size); 109 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
105 if (unlikely(fifo->static_buffer == NULL)) 110 if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
112 mutex_init(&fifo->fifo_mutex); 117 mutex_init(&fifo->fifo_mutex);
113 init_rwsem(&fifo->rwsem); 118 init_rwsem(&fifo->rwsem);
114 119
115 /*
116 * Allow mapping the first page read-only to user-space.
117 */
118
119 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); 120 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 121 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 122 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
126 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 127
128 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
129 SVGA_REG_ENABLE_HIDE);
130 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
127 131
128 min = 4; 132 min = 4;
129 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) 133 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
155 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 159 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
156 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); 160 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
157 vmw_marker_queue_init(&fifo->marker_queue); 161 vmw_marker_queue_init(&fifo->marker_queue);
158 return vmw_fifo_send_fence(dev_priv, &dummy); 162
163 return 0;
159} 164}
160 165
161void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 166void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
162{ 167{
163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 168 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
164 static DEFINE_SPINLOCK(ping_lock); 169 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags; 170 unsigned long irq_flags;
166 171
@@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
178 183
179void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 184void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
180{ 185{
181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 186 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
182 187
183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
208 213
209static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) 214static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
210{ 215{
211 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 216 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
212 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 217 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
213 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 218 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
214 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 219 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
312 * Returns: 317 * Returns:
313 * Pointer to the fifo, or null on error (possible hardware hang). 318 * Pointer to the fifo, or null on error (possible hardware hang).
314 */ 319 */
315void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) 320static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
321 uint32_t bytes)
316{ 322{
317 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 323 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
318 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 324 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
319 uint32_t max; 325 uint32_t max;
320 uint32_t min; 326 uint32_t min;
321 uint32_t next_cmd; 327 uint32_t next_cmd;
@@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
372 if (reserveable) 378 if (reserveable)
373 iowrite32(bytes, fifo_mem + 379 iowrite32(bytes, fifo_mem +
374 SVGA_FIFO_RESERVED); 380 SVGA_FIFO_RESERVED);
375 return fifo_mem + (next_cmd >> 2); 381 return (void __force *) (fifo_mem +
382 (next_cmd >> 2));
376 } else { 383 } else {
377 need_bounce = true; 384 need_bounce = true;
378 } 385 }
@@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
391out_err: 398out_err:
392 fifo_state->reserved_size = 0; 399 fifo_state->reserved_size = 0;
393 mutex_unlock(&fifo_state->fifo_mutex); 400 mutex_unlock(&fifo_state->fifo_mutex);
401
394 return NULL; 402 return NULL;
395} 403}
396 404
405void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
406 int ctx_id)
407{
408 void *ret;
409
410 if (dev_priv->cman)
411 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
412 ctx_id, false, NULL);
413 else if (ctx_id == SVGA3D_INVALID_ID)
414 ret = vmw_local_fifo_reserve(dev_priv, bytes);
415 else {
416 WARN_ON("Command buffer has not been allocated.\n");
417 ret = NULL;
418 }
419 if (IS_ERR_OR_NULL(ret)) {
420 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
421 (unsigned) bytes);
422 dump_stack();
423 return NULL;
424 }
425
426 return ret;
427}
428
397static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, 429static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
398 __le32 __iomem *fifo_mem, 430 u32 __iomem *fifo_mem,
399 uint32_t next_cmd, 431 uint32_t next_cmd,
400 uint32_t max, uint32_t min, uint32_t bytes) 432 uint32_t max, uint32_t min, uint32_t bytes)
401{ 433{
@@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
417} 449}
418 450
419static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, 451static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
420 __le32 __iomem *fifo_mem, 452 u32 __iomem *fifo_mem,
421 uint32_t next_cmd, 453 uint32_t next_cmd,
422 uint32_t max, uint32_t min, uint32_t bytes) 454 uint32_t max, uint32_t min, uint32_t bytes)
423{ 455{
@@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
436 } 468 }
437} 469}
438 470
439void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 471static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
440{ 472{
441 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 473 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
442 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 474 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
443 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 475 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
444 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 476 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
445 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 477 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
446 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 478 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
447 479
480 if (fifo_state->dx)
481 bytes += sizeof(struct vmw_temp_set_context);
482
483 fifo_state->dx = false;
448 BUG_ON((bytes & 3) != 0); 484 BUG_ON((bytes & 3) != 0);
449 BUG_ON(bytes > fifo_state->reserved_size); 485 BUG_ON(bytes > fifo_state->reserved_size);
450 486
@@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
482 mutex_unlock(&fifo_state->fifo_mutex); 518 mutex_unlock(&fifo_state->fifo_mutex);
483} 519}
484 520
521void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
522{
523 if (dev_priv->cman)
524 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
525 else
526 vmw_local_fifo_commit(dev_priv, bytes);
527}
528
529
530/**
531 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
532 *
533 * @dev_priv: Pointer to device private structure.
534 * @bytes: Number of bytes to commit.
535 */
536void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
537{
538 if (dev_priv->cman)
539 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
540 else
541 vmw_local_fifo_commit(dev_priv, bytes);
542}
543
544/**
545 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
546 * starts.
547 *
548 * @dev_priv: Pointer to device private structure.
549 * @interruptible: Whether to wait interruptible if function needs to sleep.
550 */
551int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
552{
553 might_sleep();
554
555 if (dev_priv->cman)
556 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
557 else
558 return 0;
559}
560
485int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) 561int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
486{ 562{
487 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 563 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
488 struct svga_fifo_cmd_fence *cmd_fence; 564 struct svga_fifo_cmd_fence *cmd_fence;
489 void *fm; 565 u32 *fm;
490 int ret = 0; 566 int ret = 0;
491 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); 567 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
492 568
493 fm = vmw_fifo_reserve(dev_priv, bytes); 569 fm = vmw_fifo_reserve(dev_priv, bytes);
494 if (unlikely(fm == NULL)) { 570 if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
514 return 0; 590 return 0;
515 } 591 }
516 592
517 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); 593 *fm++ = SVGA_CMD_FENCE;
518 cmd_fence = (struct svga_fifo_cmd_fence *) 594 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
519 ((unsigned long)fm + sizeof(__le32)); 595 cmd_fence->fence = *seqno;
520 596 vmw_fifo_commit_flush(dev_priv, bytes);
521 iowrite32(*seqno, &cmd_fence->fence);
522 vmw_fifo_commit(dev_priv, bytes);
523 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); 597 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
524 vmw_update_seqno(dev_priv, fifo_state); 598 vmw_update_seqno(dev_priv, fifo_state);
525 599
@@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
545 * without writing to the query result structure. 619 * without writing to the query result structure.
546 */ 620 */
547 621
548 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 622 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
549 struct { 623 struct {
550 SVGA3dCmdHeader header; 624 SVGA3dCmdHeader header;
551 SVGA3dCmdWaitForQuery body; 625 SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
594 * without writing to the query result structure. 668 * without writing to the query result structure.
595 */ 669 */
596 670
597 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 671 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
598 struct { 672 struct {
599 SVGA3dCmdHeader header; 673 SVGA3dCmdHeader header;
600 SVGA3dCmdWaitForGBQuery body; 674 SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
647 721
648 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); 722 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
649} 723}
724
725void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
726{
727 return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
728}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d803199f..66ffa1d4759c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23123c..0a970afed93b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include <drm/vmwgfx_drm.h> 29#include <drm/vmwgfx_drm.h>
30#include "vmwgfx_kms.h" 30#include "vmwgfx_kms.h"
31#include "device_include/svga3d_caps.h"
31 32
32struct svga_3d_compat_cap { 33struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header; 34 SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
63 break; 64 break;
64 case DRM_VMW_PARAM_FIFO_HW_VERSION: 65 case DRM_VMW_PARAM_FIFO_HW_VERSION:
65 { 66 {
66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 67 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
67 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 68 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
68 69
69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { 70 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
105 case DRM_VMW_PARAM_MAX_MOB_SIZE: 106 case DRM_VMW_PARAM_MAX_MOB_SIZE:
106 param->value = dev_priv->max_mob_size; 107 param->value = dev_priv->max_mob_size;
107 break; 108 break;
109 case DRM_VMW_PARAM_SCREEN_TARGET:
110 param->value =
111 (dev_priv->active_display_unit == vmw_du_screen_target);
112 break;
113 case DRM_VMW_PARAM_DX:
114 param->value = dev_priv->has_dx;
115 break;
108 default: 116 default:
109 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
110 param->param); 118 param->param);
@@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
154 (struct drm_vmw_get_3d_cap_arg *) data; 162 (struct drm_vmw_get_3d_cap_arg *) data;
155 struct vmw_private *dev_priv = vmw_priv(dev); 163 struct vmw_private *dev_priv = vmw_priv(dev);
156 uint32_t size; 164 uint32_t size;
157 __le32 __iomem *fifo_mem; 165 u32 __iomem *fifo_mem;
158 void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); 166 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
159 void *bounce; 167 void *bounce;
160 int ret; 168 int ret;
@@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
235 int ret; 243 int ret;
236 244
237 num_clips = arg->num_clips; 245 num_clips = arg->num_clips;
238 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; 246 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
239 247
240 if (unlikely(num_clips == 0)) 248 if (unlikely(num_clips == 0))
241 return 0; 249 return 0;
@@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
318 int ret; 326 int ret;
319 327
320 num_clips = arg->num_clips; 328 num_clips = arg->num_clips;
321 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; 329 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
322 330
323 if (unlikely(num_clips == 0)) 331 if (unlikely(num_clips == 0))
324 return 0; 332 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827ee499..9498a5e33c12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
57 wake_up_all(&dev_priv->fifo_queue); 57 wake_up_all(&dev_priv->fifo_queue);
58 58
59 if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
60 SVGA_IRQFLAG_ERROR))
61 vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
59 62
60 return IRQ_HANDLED; 63 return IRQ_HANDLED;
61} 64}
@@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
69void vmw_update_seqno(struct vmw_private *dev_priv, 72void vmw_update_seqno(struct vmw_private *dev_priv,
70 struct vmw_fifo_state *fifo_state) 73 struct vmw_fifo_state *fifo_state)
71{ 74{
72 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 75 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
73 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
74 77
75 if (dev_priv->last_read_seqno != seqno) { 78 if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
131 * Block command submission while waiting for idle. 134 * Block command submission while waiting for idle.
132 */ 135 */
133 136
134 if (fifo_idle) 137 if (fifo_idle) {
135 down_read(&fifo_state->rwsem); 138 down_read(&fifo_state->rwsem);
139 if (dev_priv->cman) {
140 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
141 10*HZ);
142 if (ret)
143 goto out_err;
144 }
145 }
146
136 signal_seq = atomic_read(&dev_priv->marker_seq); 147 signal_seq = atomic_read(&dev_priv->marker_seq);
137 ret = 0; 148 ret = 0;
138 149
@@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
167 } 178 }
168 finish_wait(&dev_priv->fence_queue, &__wait); 179 finish_wait(&dev_priv->fence_queue, &__wait);
169 if (ret == 0 && fifo_idle) { 180 if (ret == 0 && fifo_idle) {
170 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 181 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
171 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 182 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
172 } 183 }
173 wake_up_all(&dev_priv->fence_queue); 184 wake_up_all(&dev_priv->fence_queue);
185out_err:
174 if (fifo_idle) 186 if (fifo_idle)
175 up_read(&fifo_state->rwsem); 187 up_read(&fifo_state->rwsem);
176 188
@@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
315 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 327 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
316 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 328 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
317} 329}
330
331void vmw_generic_waiter_add(struct vmw_private *dev_priv,
332 u32 flag, int *waiter_count)
333{
334 unsigned long irq_flags;
335
336 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
337 if ((*waiter_count)++ == 0) {
338 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
339 dev_priv->irq_mask |= flag;
340 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
341 }
342 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
343}
344
345void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
346 u32 flag, int *waiter_count)
347{
348 unsigned long irq_flags;
349
350 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
351 if (--(*waiter_count) == 0) {
352 dev_priv->irq_mask &= ~flag;
353 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
354 }
355 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
356}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2adc11bc0920..61fb7f3de311 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
31/* Might need a hrtimer here? */ 31/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 33
34 34void vmw_du_cleanup(struct vmw_display_unit *du)
35struct vmw_clip_rect {
36 int x1, x2, y1, y2;
37};
38
39/**
40 * Clip @num_rects number of @rects against @clip storing the
41 * results in @out_rects and the number of passed rects in @out_num.
42 */
43static void vmw_clip_cliprects(struct drm_clip_rect *rects,
44 int num_rects,
45 struct vmw_clip_rect clip,
46 SVGASignedRect *out_rects,
47 int *out_num)
48{
49 int i, k;
50
51 for (i = 0, k = 0; i < num_rects; i++) {
52 int x1 = max_t(int, clip.x1, rects[i].x1);
53 int y1 = max_t(int, clip.y1, rects[i].y1);
54 int x2 = min_t(int, clip.x2, rects[i].x2);
55 int y2 = min_t(int, clip.y2, rects[i].y2);
56
57 if (x1 >= x2)
58 continue;
59 if (y1 >= y2)
60 continue;
61
62 out_rects[k].left = x1;
63 out_rects[k].top = y1;
64 out_rects[k].right = x2;
65 out_rects[k].bottom = y2;
66 k++;
67 }
68
69 *out_num = k;
70}
71
72void vmw_display_unit_cleanup(struct vmw_display_unit *du)
73{ 35{
74 if (du->cursor_surface) 36 if (du->cursor_surface)
75 vmw_surface_unreference(&du->cursor_surface); 37 vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
109 71
110 memcpy(&cmd[1], image, image_size); 72 memcpy(&cmd[1], image, image_size);
111 73
112 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); 74 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
113 cmd->cursor.id = cpu_to_le32(0); 75 cmd->cursor.id = 0;
114 cmd->cursor.width = cpu_to_le32(width); 76 cmd->cursor.width = width;
115 cmd->cursor.height = cpu_to_le32(height); 77 cmd->cursor.height = height;
116 cmd->cursor.hotspotX = cpu_to_le32(hotspotX); 78 cmd->cursor.hotspotX = hotspotX;
117 cmd->cursor.hotspotY = cpu_to_le32(hotspotY); 79 cmd->cursor.hotspotY = hotspotY;
118 80
119 vmw_fifo_commit(dev_priv, cmd_size); 81 vmw_fifo_commit(dev_priv, cmd_size);
120 82
@@ -161,7 +123,7 @@ err_unreserve:
161void vmw_cursor_update_position(struct vmw_private *dev_priv, 123void vmw_cursor_update_position(struct vmw_private *dev_priv,
162 bool show, int x, int y) 124 bool show, int x, int y)
163{ 125{
164 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 126 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
165 uint32_t count; 127 uint32_t count;
166 128
167 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 129 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
367 329
368 srf->snooper.age++; 330 srf->snooper.age++;
369 331
370 /* we can't call this function from this function since execbuf has
371 * reserved fifo space.
372 *
373 * if (srf->snooper.crtc)
374 * vmw_ldu_crtc_cursor_update_image(dev_priv,
375 * srf->snooper.image, 64, 64,
376 * du->hotspot_x, du->hotspot_y);
377 */
378
379 ttm_bo_kunmap(&map); 332 ttm_bo_kunmap(&map);
380err_unreserve: 333err_unreserve:
381 ttm_bo_unreserve(bo); 334 ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
412 * Surface framebuffer code 365 * Surface framebuffer code
413 */ 366 */
414 367
415#define vmw_framebuffer_to_vfbs(x) \
416 container_of(x, struct vmw_framebuffer_surface, base.base)
417
418struct vmw_framebuffer_surface {
419 struct vmw_framebuffer base;
420 struct vmw_surface *surface;
421 struct vmw_dma_buffer *buffer;
422 struct list_head head;
423 struct drm_master *master;
424};
425
426static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 368static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
427{ 369{
428 struct vmw_framebuffer_surface *vfbs = 370 struct vmw_framebuffer_surface *vfbs =
429 vmw_framebuffer_to_vfbs(framebuffer); 371 vmw_framebuffer_to_vfbs(framebuffer);
430 struct vmw_master *vmaster = vmw_master(vfbs->master);
431
432 372
433 mutex_lock(&vmaster->fb_surf_mutex);
434 list_del(&vfbs->head);
435 mutex_unlock(&vmaster->fb_surf_mutex);
436
437 drm_master_put(&vfbs->master);
438 drm_framebuffer_cleanup(framebuffer); 373 drm_framebuffer_cleanup(framebuffer);
439 vmw_surface_unreference(&vfbs->surface); 374 vmw_surface_unreference(&vfbs->surface);
440 ttm_base_object_unref(&vfbs->base.user_obj); 375 if (vfbs->base.user_obj)
376 ttm_base_object_unref(&vfbs->base.user_obj);
441 377
442 kfree(vfbs); 378 kfree(vfbs);
443} 379}
444 380
445static int do_surface_dirty_sou(struct vmw_private *dev_priv,
446 struct drm_file *file_priv,
447 struct vmw_framebuffer *framebuffer,
448 unsigned flags, unsigned color,
449 struct drm_clip_rect *clips,
450 unsigned num_clips, int inc,
451 struct vmw_fence_obj **out_fence)
452{
453 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
454 struct drm_clip_rect *clips_ptr;
455 struct drm_clip_rect *tmp;
456 struct drm_crtc *crtc;
457 size_t fifo_size;
458 int i, num_units;
459 int ret = 0; /* silence warning */
460 int left, right, top, bottom;
461
462 struct {
463 SVGA3dCmdHeader header;
464 SVGA3dCmdBlitSurfaceToScreen body;
465 } *cmd;
466 SVGASignedRect *blits;
467
468 num_units = 0;
469 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
470 head) {
471 if (crtc->primary->fb != &framebuffer->base)
472 continue;
473 units[num_units++] = vmw_crtc_to_du(crtc);
474 }
475
476 BUG_ON(!clips || !num_clips);
477
478 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
479 if (unlikely(tmp == NULL)) {
480 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
481 return -ENOMEM;
482 }
483
484 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
485 cmd = kzalloc(fifo_size, GFP_KERNEL);
486 if (unlikely(cmd == NULL)) {
487 DRM_ERROR("Temporary fifo memory alloc failed.\n");
488 ret = -ENOMEM;
489 goto out_free_tmp;
490 }
491
492 /* setup blits pointer */
493 blits = (SVGASignedRect *)&cmd[1];
494
495 /* initial clip region */
496 left = clips->x1;
497 right = clips->x2;
498 top = clips->y1;
499 bottom = clips->y2;
500
501 /* skip the first clip rect */
502 for (i = 1, clips_ptr = clips + inc;
503 i < num_clips; i++, clips_ptr += inc) {
504 left = min_t(int, left, (int)clips_ptr->x1);
505 right = max_t(int, right, (int)clips_ptr->x2);
506 top = min_t(int, top, (int)clips_ptr->y1);
507 bottom = max_t(int, bottom, (int)clips_ptr->y2);
508 }
509
510 /* only need to do this once */
511 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
512 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
513
514 cmd->body.srcRect.left = left;
515 cmd->body.srcRect.right = right;
516 cmd->body.srcRect.top = top;
517 cmd->body.srcRect.bottom = bottom;
518
519 clips_ptr = clips;
520 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
521 tmp[i].x1 = clips_ptr->x1 - left;
522 tmp[i].x2 = clips_ptr->x2 - left;
523 tmp[i].y1 = clips_ptr->y1 - top;
524 tmp[i].y2 = clips_ptr->y2 - top;
525 }
526
527 /* do per unit writing, reuse fifo for each */
528 for (i = 0; i < num_units; i++) {
529 struct vmw_display_unit *unit = units[i];
530 struct vmw_clip_rect clip;
531 int num;
532
533 clip.x1 = left - unit->crtc.x;
534 clip.y1 = top - unit->crtc.y;
535 clip.x2 = right - unit->crtc.x;
536 clip.y2 = bottom - unit->crtc.y;
537
538 /* skip any crtcs that misses the clip region */
539 if (clip.x1 >= unit->crtc.mode.hdisplay ||
540 clip.y1 >= unit->crtc.mode.vdisplay ||
541 clip.x2 <= 0 || clip.y2 <= 0)
542 continue;
543
544 /*
545 * In order for the clip rects to be correctly scaled
546 * the src and dest rects needs to be the same size.
547 */
548 cmd->body.destRect.left = clip.x1;
549 cmd->body.destRect.right = clip.x2;
550 cmd->body.destRect.top = clip.y1;
551 cmd->body.destRect.bottom = clip.y2;
552
553 /* create a clip rect of the crtc in dest coords */
554 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
555 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
556 clip.x1 = 0 - clip.x1;
557 clip.y1 = 0 - clip.y1;
558
559 /* need to reset sid as it is changed by execbuf */
560 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
561 cmd->body.destScreenId = unit->unit;
562
563 /* clip and write blits to cmd stream */
564 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
565
566 /* if no cliprects hit skip this */
567 if (num == 0)
568 continue;
569
570 /* only return the last fence */
571 if (out_fence && *out_fence)
572 vmw_fence_obj_unreference(out_fence);
573
574 /* recalculate package length */
575 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
576 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
577 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
578 fifo_size, 0, NULL, out_fence);
579
580 if (unlikely(ret != 0))
581 break;
582 }
583
584
585 kfree(cmd);
586out_free_tmp:
587 kfree(tmp);
588
589 return ret;
590}
591
592static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 381static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
593 struct drm_file *file_priv, 382 struct drm_file *file_priv,
594 unsigned flags, unsigned color, 383 unsigned flags, unsigned color,
@@ -601,11 +390,8 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
601 struct drm_clip_rect norect; 390 struct drm_clip_rect norect;
602 int ret, inc = 1; 391 int ret, inc = 1;
603 392
604 if (unlikely(vfbs->master != file_priv->master)) 393 /* Legacy Display Unit does not support 3D */
605 return -EINVAL; 394 if (dev_priv->active_display_unit == vmw_du_legacy)
606
607 /* Require ScreenObject support for 3D */
608 if (!dev_priv->sou_priv)
609 return -EINVAL; 395 return -EINVAL;
610 396
611 drm_modeset_lock_all(dev_priv->dev); 397 drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
627 inc = 2; /* skip source rects */ 413 inc = 2; /* skip source rects */
628 } 414 }
629 415
630 ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, 416 if (dev_priv->active_display_unit == vmw_du_screen_object)
631 flags, color, 417 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
632 clips, num_clips, inc, NULL); 418 clips, NULL, NULL, 0, 0,
419 num_clips, inc, NULL);
420 else
421 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
422 clips, NULL, NULL, 0, 0,
423 num_clips, inc, NULL);
633 424
425 vmw_fifo_flush(dev_priv, false);
634 ttm_read_unlock(&dev_priv->reservation_sem); 426 ttm_read_unlock(&dev_priv->reservation_sem);
635 427
636 drm_modeset_unlock_all(dev_priv->dev); 428 drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
638 return 0; 430 return 0;
639} 431}
640 432
433/**
434 * vmw_kms_readback - Perform a readback from the screen system to
435 * a dma-buffer backed framebuffer.
436 *
437 * @dev_priv: Pointer to the device private structure.
438 * @file_priv: Pointer to a struct drm_file identifying the caller.
439 * Must be set to NULL if @user_fence_rep is NULL.
440 * @vfb: Pointer to the dma-buffer backed framebuffer.
441 * @user_fence_rep: User-space provided structure for fence information.
442 * Must be set to non-NULL if @file_priv is non-NULL.
443 * @vclips: Array of clip rects.
444 * @num_clips: Number of clip rects in @vclips.
445 *
446 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
447 * interrupted.
448 */
449int vmw_kms_readback(struct vmw_private *dev_priv,
450 struct drm_file *file_priv,
451 struct vmw_framebuffer *vfb,
452 struct drm_vmw_fence_rep __user *user_fence_rep,
453 struct drm_vmw_rect *vclips,
454 uint32_t num_clips)
455{
456 switch (dev_priv->active_display_unit) {
457 case vmw_du_screen_object:
458 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
459 user_fence_rep, vclips, num_clips);
460 case vmw_du_screen_target:
461 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
462 user_fence_rep, NULL, vclips, num_clips,
463 1, false, true);
464 default:
465 WARN_ONCE(true,
466 "Readback called with invalid display system.\n");
467}
468
469 return -ENOSYS;
470}
471
472
641static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 473static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
642 .destroy = vmw_framebuffer_surface_destroy, 474 .destroy = vmw_framebuffer_surface_destroy,
643 .dirty = vmw_framebuffer_surface_dirty, 475 .dirty = vmw_framebuffer_surface_dirty,
644}; 476};
645 477
646static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 478static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
647 struct drm_file *file_priv,
648 struct vmw_surface *surface, 479 struct vmw_surface *surface,
649 struct vmw_framebuffer **out, 480 struct vmw_framebuffer **out,
650 const struct drm_mode_fb_cmd 481 const struct drm_mode_fb_cmd
651 *mode_cmd) 482 *mode_cmd,
483 bool is_dmabuf_proxy)
652 484
653{ 485{
654 struct drm_device *dev = dev_priv->dev; 486 struct drm_device *dev = dev_priv->dev;
655 struct vmw_framebuffer_surface *vfbs; 487 struct vmw_framebuffer_surface *vfbs;
656 enum SVGA3dSurfaceFormat format; 488 enum SVGA3dSurfaceFormat format;
657 struct vmw_master *vmaster = vmw_master(file_priv->master);
658 int ret; 489 int ret;
659 490
660 /* 3D is only supported on HWv8 hosts which supports screen objects */ 491 /* 3D is only supported on HWv8 and newer hosts */
661 if (!dev_priv->sou_priv) 492 if (dev_priv->active_display_unit == vmw_du_legacy)
662 return -ENOSYS; 493 return -ENOSYS;
663 494
664 /* 495 /*
@@ -692,15 +523,16 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
692 case 15: 523 case 15:
693 format = SVGA3D_A1R5G5B5; 524 format = SVGA3D_A1R5G5B5;
694 break; 525 break;
695 case 8:
696 format = SVGA3D_LUMINANCE8;
697 break;
698 default: 526 default:
699 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); 527 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
700 return -EINVAL; 528 return -EINVAL;
701 } 529 }
702 530
703 if (unlikely(format != surface->format)) { 531 /*
532 * For DX, surface format validation is done when surface->scanout
533 * is set.
534 */
535 if (!dev_priv->has_dx && format != surface->format) {
704 DRM_ERROR("Invalid surface format for requested mode.\n"); 536 DRM_ERROR("Invalid surface format for requested mode.\n");
705 return -EINVAL; 537 return -EINVAL;
706 } 538 }
@@ -711,38 +543,27 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
711 goto out_err1; 543 goto out_err1;
712 } 544 }
713 545
714 if (!vmw_surface_reference(surface)) {
715 DRM_ERROR("failed to reference surface %p\n", surface);
716 ret = -EINVAL;
717 goto out_err2;
718 }
719
720 /* XXX get the first 3 from the surface info */ 546 /* XXX get the first 3 from the surface info */
721 vfbs->base.base.bits_per_pixel = mode_cmd->bpp; 547 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
722 vfbs->base.base.pitches[0] = mode_cmd->pitch; 548 vfbs->base.base.pitches[0] = mode_cmd->pitch;
723 vfbs->base.base.depth = mode_cmd->depth; 549 vfbs->base.base.depth = mode_cmd->depth;
724 vfbs->base.base.width = mode_cmd->width; 550 vfbs->base.base.width = mode_cmd->width;
725 vfbs->base.base.height = mode_cmd->height; 551 vfbs->base.base.height = mode_cmd->height;
726 vfbs->surface = surface; 552 vfbs->surface = vmw_surface_reference(surface);
727 vfbs->base.user_handle = mode_cmd->handle; 553 vfbs->base.user_handle = mode_cmd->handle;
728 vfbs->master = drm_master_get(file_priv->master); 554 vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
729
730 mutex_lock(&vmaster->fb_surf_mutex);
731 list_add_tail(&vfbs->head, &vmaster->fb_surf);
732 mutex_unlock(&vmaster->fb_surf_mutex);
733 555
734 *out = &vfbs->base; 556 *out = &vfbs->base;
735 557
736 ret = drm_framebuffer_init(dev, &vfbs->base.base, 558 ret = drm_framebuffer_init(dev, &vfbs->base.base,
737 &vmw_framebuffer_surface_funcs); 559 &vmw_framebuffer_surface_funcs);
738 if (ret) 560 if (ret)
739 goto out_err3; 561 goto out_err2;
740 562
741 return 0; 563 return 0;
742 564
743out_err3:
744 vmw_surface_unreference(&surface);
745out_err2: 565out_err2:
566 vmw_surface_unreference(&surface);
746 kfree(vfbs); 567 kfree(vfbs);
747out_err1: 568out_err1:
748 return ret; 569 return ret;
@@ -752,14 +573,6 @@ out_err1:
752 * Dmabuf framebuffer code 573 * Dmabuf framebuffer code
753 */ 574 */
754 575
755#define vmw_framebuffer_to_vfbd(x) \
756 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
757
758struct vmw_framebuffer_dmabuf {
759 struct vmw_framebuffer base;
760 struct vmw_dma_buffer *buffer;
761};
762
763static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 576static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
764{ 577{
765 struct vmw_framebuffer_dmabuf *vfbd = 578 struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
767 580
768 drm_framebuffer_cleanup(framebuffer); 581 drm_framebuffer_cleanup(framebuffer);
769 vmw_dmabuf_unreference(&vfbd->buffer); 582 vmw_dmabuf_unreference(&vfbd->buffer);
770 ttm_base_object_unref(&vfbd->base.user_obj); 583 if (vfbd->base.user_obj)
584 ttm_base_object_unref(&vfbd->base.user_obj);
771 585
772 kfree(vfbd); 586 kfree(vfbd);
773} 587}
774 588
775static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
776 struct vmw_framebuffer *framebuffer,
777 unsigned flags, unsigned color,
778 struct drm_clip_rect *clips,
779 unsigned num_clips, int increment)
780{
781 size_t fifo_size;
782 int i;
783
784 struct {
785 uint32_t header;
786 SVGAFifoCmdUpdate body;
787 } *cmd;
788
789 fifo_size = sizeof(*cmd) * num_clips;
790 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
791 if (unlikely(cmd == NULL)) {
792 DRM_ERROR("Fifo reserve failed.\n");
793 return -ENOMEM;
794 }
795
796 memset(cmd, 0, fifo_size);
797 for (i = 0; i < num_clips; i++, clips += increment) {
798 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
799 cmd[i].body.x = cpu_to_le32(clips->x1);
800 cmd[i].body.y = cpu_to_le32(clips->y1);
801 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
802 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
803 }
804
805 vmw_fifo_commit(dev_priv, fifo_size);
806 return 0;
807}
808
809static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
810 struct vmw_private *dev_priv,
811 struct vmw_framebuffer *framebuffer)
812{
813 int depth = framebuffer->base.depth;
814 size_t fifo_size;
815 int ret;
816
817 struct {
818 uint32_t header;
819 SVGAFifoCmdDefineGMRFB body;
820 } *cmd;
821
822 /* Emulate RGBA support, contrary to svga_reg.h this is not
823 * supported by hosts. This is only a problem if we are reading
824 * this value later and expecting what we uploaded back.
825 */
826 if (depth == 32)
827 depth = 24;
828
829 fifo_size = sizeof(*cmd);
830 cmd = kmalloc(fifo_size, GFP_KERNEL);
831 if (unlikely(cmd == NULL)) {
832 DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
833 return -ENOMEM;
834 }
835
836 memset(cmd, 0, fifo_size);
837 cmd->header = SVGA_CMD_DEFINE_GMRFB;
838 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
839 cmd->body.format.colorDepth = depth;
840 cmd->body.format.reserved = 0;
841 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
842 cmd->body.ptr.gmrId = framebuffer->user_handle;
843 cmd->body.ptr.offset = 0;
844
845 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
846 fifo_size, 0, NULL, NULL);
847
848 kfree(cmd);
849
850 return ret;
851}
852
853static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
854 struct vmw_private *dev_priv,
855 struct vmw_framebuffer *framebuffer,
856 unsigned flags, unsigned color,
857 struct drm_clip_rect *clips,
858 unsigned num_clips, int increment,
859 struct vmw_fence_obj **out_fence)
860{
861 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
862 struct drm_clip_rect *clips_ptr;
863 int i, k, num_units, ret;
864 struct drm_crtc *crtc;
865 size_t fifo_size;
866
867 struct {
868 uint32_t header;
869 SVGAFifoCmdBlitGMRFBToScreen body;
870 } *blits;
871
872 ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
873 if (unlikely(ret != 0))
874 return ret; /* define_gmrfb prints warnings */
875
876 fifo_size = sizeof(*blits) * num_clips;
877 blits = kmalloc(fifo_size, GFP_KERNEL);
878 if (unlikely(blits == NULL)) {
879 DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
880 return -ENOMEM;
881 }
882
883 num_units = 0;
884 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
885 if (crtc->primary->fb != &framebuffer->base)
886 continue;
887 units[num_units++] = vmw_crtc_to_du(crtc);
888 }
889
890 for (k = 0; k < num_units; k++) {
891 struct vmw_display_unit *unit = units[k];
892 int hit_num = 0;
893
894 clips_ptr = clips;
895 for (i = 0; i < num_clips; i++, clips_ptr += increment) {
896 int clip_x1 = clips_ptr->x1 - unit->crtc.x;
897 int clip_y1 = clips_ptr->y1 - unit->crtc.y;
898 int clip_x2 = clips_ptr->x2 - unit->crtc.x;
899 int clip_y2 = clips_ptr->y2 - unit->crtc.y;
900 int move_x, move_y;
901
902 /* skip any crtcs that misses the clip region */
903 if (clip_x1 >= unit->crtc.mode.hdisplay ||
904 clip_y1 >= unit->crtc.mode.vdisplay ||
905 clip_x2 <= 0 || clip_y2 <= 0)
906 continue;
907
908 /* clip size to crtc size */
909 clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
910 clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
911
912 /* translate both src and dest to bring clip into screen */
913 move_x = min_t(int, clip_x1, 0);
914 move_y = min_t(int, clip_y1, 0);
915
916 /* actual translate done here */
917 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
918 blits[hit_num].body.destScreenId = unit->unit;
919 blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
920 blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
921 blits[hit_num].body.destRect.left = clip_x1 - move_x;
922 blits[hit_num].body.destRect.top = clip_y1 - move_y;
923 blits[hit_num].body.destRect.right = clip_x2;
924 blits[hit_num].body.destRect.bottom = clip_y2;
925 hit_num++;
926 }
927
928 /* no clips hit the crtc */
929 if (hit_num == 0)
930 continue;
931
932 /* only return the last fence */
933 if (out_fence && *out_fence)
934 vmw_fence_obj_unreference(out_fence);
935
936 fifo_size = sizeof(*blits) * hit_num;
937 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
938 fifo_size, 0, NULL, out_fence);
939
940 if (unlikely(ret != 0))
941 break;
942 }
943
944 kfree(blits);
945
946 return ret;
947}
948
949static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 589static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
950 struct drm_file *file_priv, 590 struct drm_file *file_priv,
951 unsigned flags, unsigned color, 591 unsigned flags, unsigned color,
@@ -977,16 +617,29 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
977 increment = 2; 617 increment = 2;
978 } 618 }
979 619
980 if (dev_priv->ldu_priv) { 620 switch (dev_priv->active_display_unit) {
981 ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, 621 case vmw_du_screen_target:
982 flags, color, 622 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
983 clips, num_clips, increment); 623 clips, NULL, num_clips, increment,
984 } else { 624 true, true);
985 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, 625 break;
986 flags, color, 626 case vmw_du_screen_object:
987 clips, num_clips, increment, NULL); 627 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
628 clips, num_clips, increment,
629 true,
630 NULL);
631 break;
632 case vmw_du_legacy:
633 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
634 clips, num_clips, increment);
635 break;
636 default:
637 ret = -EINVAL;
638 WARN_ONCE(true, "Dirty called with invalid display system.\n");
639 break;
988 } 640 }
989 641
642 vmw_fifo_flush(dev_priv, false);
990 ttm_read_unlock(&dev_priv->reservation_sem); 643 ttm_read_unlock(&dev_priv->reservation_sem);
991 644
992 drm_modeset_unlock_all(dev_priv->dev); 645 drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,41 +655,133 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
1002/** 655/**
1003 * Pin the dmabuffer to the start of vram. 656 * Pin the dmabuffer to the start of vram.
1004 */ 657 */
1005static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 658static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1006{ 659{
1007 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 660 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1008 struct vmw_framebuffer_dmabuf *vfbd = 661 struct vmw_dma_buffer *buf;
1009 vmw_framebuffer_to_vfbd(&vfb->base);
1010 int ret; 662 int ret;
1011 663
1012 /* This code should not be used with screen objects */ 664 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1013 BUG_ON(dev_priv->sou_priv); 665 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1014
1015 vmw_overlay_pause_all(dev_priv);
1016 666
1017 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); 667 if (!buf)
1018 668 return 0;
1019 vmw_overlay_resume_all(dev_priv);
1020 669
1021 WARN_ON(ret != 0); 670 switch (dev_priv->active_display_unit) {
671 case vmw_du_legacy:
672 vmw_overlay_pause_all(dev_priv);
673 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
674 vmw_overlay_resume_all(dev_priv);
675 break;
676 case vmw_du_screen_object:
677 case vmw_du_screen_target:
678 if (vfb->dmabuf)
679 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
680 false);
681
682 return vmw_dmabuf_pin_in_placement(dev_priv, buf,
683 &vmw_mob_placement, false);
684 default:
685 return -EINVAL;
686 }
1022 687
1023 return 0; 688 return ret;
1024} 689}
1025 690
1026static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) 691static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1027{ 692{
1028 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 693 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1029 struct vmw_framebuffer_dmabuf *vfbd = 694 struct vmw_dma_buffer *buf;
1030 vmw_framebuffer_to_vfbd(&vfb->base);
1031 695
1032 if (!vfbd->buffer) { 696 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1033 WARN_ON(!vfbd->buffer); 697 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
698
699 if (WARN_ON(!buf))
1034 return 0; 700 return 0;
701
702 return vmw_dmabuf_unpin(dev_priv, buf, false);
703}
704
705/**
706 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
707 *
708 * @dev: DRM device
709 * @mode_cmd: parameters for the new surface
710 * @dmabuf_mob: MOB backing the DMA buf
711 * @srf_out: newly created surface
712 *
713 * When the content FB is a DMA buf, we create a surface as a proxy to the
714 * same buffer. This way we can do a surface copy rather than a surface DMA.
715 * This is a more efficient approach
716 *
717 * RETURNS:
718 * 0 on success, error code otherwise
719 */
720static int vmw_create_dmabuf_proxy(struct drm_device *dev,
721 const struct drm_mode_fb_cmd *mode_cmd,
722 struct vmw_dma_buffer *dmabuf_mob,
723 struct vmw_surface **srf_out)
724{
725 uint32_t format;
726 struct drm_vmw_size content_base_size;
727 struct vmw_resource *res;
728 int ret;
729
730 switch (mode_cmd->depth) {
731 case 32:
732 case 24:
733 format = SVGA3D_X8R8G8B8;
734 break;
735
736 case 16:
737 case 15:
738 format = SVGA3D_R5G6B5;
739 break;
740
741 case 8:
742 format = SVGA3D_P8;
743 break;
744
745 default:
746 DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
747 return -EINVAL;
1035 } 748 }
1036 749
1037 return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); 750 content_base_size.width = mode_cmd->width;
751 content_base_size.height = mode_cmd->height;
752 content_base_size.depth = 1;
753
754 ret = vmw_surface_gb_priv_define(dev,
755 0, /* kernel visible only */
756 0, /* flags */
757 format,
758 true, /* can be a scanout buffer */
759 1, /* num of mip levels */
760 0,
761 0,
762 content_base_size,
763 srf_out);
764 if (ret) {
765 DRM_ERROR("Failed to allocate proxy content buffer\n");
766 return ret;
767 }
768
769 res = &(*srf_out)->res;
770
771 /* Reserve and switch the backing mob. */
772 mutex_lock(&res->dev_priv->cmdbuf_mutex);
773 (void) vmw_resource_reserve(res, false, true);
774 vmw_dmabuf_unreference(&res->backup);
775 res->backup = vmw_dmabuf_reference(dmabuf_mob);
776 res->backup_offset = 0;
777 vmw_resource_unreserve(res, false, NULL, 0);
778 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
779
780 return 0;
1038} 781}
1039 782
783
784
1040static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 785static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1041 struct vmw_dma_buffer *dmabuf, 786 struct vmw_dma_buffer *dmabuf,
1042 struct vmw_framebuffer **out, 787 struct vmw_framebuffer **out,
@@ -1057,7 +802,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1057 } 802 }
1058 803
1059 /* Limited framebuffer color depth support for screen objects */ 804 /* Limited framebuffer color depth support for screen objects */
1060 if (dev_priv->sou_priv) { 805 if (dev_priv->active_display_unit == vmw_du_screen_object) {
1061 switch (mode_cmd->depth) { 806 switch (mode_cmd->depth) {
1062 case 32: 807 case 32:
1063 case 24: 808 case 24:
@@ -1089,41 +834,96 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1089 goto out_err1; 834 goto out_err1;
1090 } 835 }
1091 836
1092 if (!vmw_dmabuf_reference(dmabuf)) {
1093 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
1094 ret = -EINVAL;
1095 goto out_err2;
1096 }
1097
1098 vfbd->base.base.bits_per_pixel = mode_cmd->bpp; 837 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
1099 vfbd->base.base.pitches[0] = mode_cmd->pitch; 838 vfbd->base.base.pitches[0] = mode_cmd->pitch;
1100 vfbd->base.base.depth = mode_cmd->depth; 839 vfbd->base.base.depth = mode_cmd->depth;
1101 vfbd->base.base.width = mode_cmd->width; 840 vfbd->base.base.width = mode_cmd->width;
1102 vfbd->base.base.height = mode_cmd->height; 841 vfbd->base.base.height = mode_cmd->height;
1103 if (!dev_priv->sou_priv) {
1104 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
1105 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
1106 }
1107 vfbd->base.dmabuf = true; 842 vfbd->base.dmabuf = true;
1108 vfbd->buffer = dmabuf; 843 vfbd->buffer = vmw_dmabuf_reference(dmabuf);
1109 vfbd->base.user_handle = mode_cmd->handle; 844 vfbd->base.user_handle = mode_cmd->handle;
1110 *out = &vfbd->base; 845 *out = &vfbd->base;
1111 846
1112 ret = drm_framebuffer_init(dev, &vfbd->base.base, 847 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1113 &vmw_framebuffer_dmabuf_funcs); 848 &vmw_framebuffer_dmabuf_funcs);
1114 if (ret) 849 if (ret)
1115 goto out_err3; 850 goto out_err2;
1116 851
1117 return 0; 852 return 0;
1118 853
1119out_err3:
1120 vmw_dmabuf_unreference(&dmabuf);
1121out_err2: 854out_err2:
855 vmw_dmabuf_unreference(&dmabuf);
1122 kfree(vfbd); 856 kfree(vfbd);
1123out_err1: 857out_err1:
1124 return ret; 858 return ret;
1125} 859}
1126 860
861/**
862 * vmw_kms_new_framebuffer - Create a new framebuffer.
863 *
864 * @dev_priv: Pointer to device private struct.
865 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
866 * Either @dmabuf or @surface must be NULL.
867 * @surface: Pointer to a surface to wrap the kms framebuffer around.
868 * Either @dmabuf or @surface must be NULL.
869 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
870 * Helps the code to do some important optimizations.
871 * @mode_cmd: Frame-buffer metadata.
872 */
873struct vmw_framebuffer *
874vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
875 struct vmw_dma_buffer *dmabuf,
876 struct vmw_surface *surface,
877 bool only_2d,
878 const struct drm_mode_fb_cmd *mode_cmd)
879{
880 struct vmw_framebuffer *vfb = NULL;
881 bool is_dmabuf_proxy = false;
882 int ret;
883
884 /*
885 * We cannot use the SurfaceDMA command in an non-accelerated VM,
886 * therefore, wrap the DMA buf in a surface so we can use the
887 * SurfaceCopy command.
888 */
889 if (dmabuf && only_2d &&
890 dev_priv->active_display_unit == vmw_du_screen_target) {
891 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
892 dmabuf, &surface);
893 if (ret)
894 return ERR_PTR(ret);
895
896 is_dmabuf_proxy = true;
897 }
898
899 /* Create the new framebuffer depending one what we have */
900 if (surface) {
901 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
902 mode_cmd,
903 is_dmabuf_proxy);
904
905 /*
906 * vmw_create_dmabuf_proxy() adds a reference that is no longer
907 * needed
908 */
909 if (is_dmabuf_proxy)
910 vmw_surface_unreference(&surface);
911 } else if (dmabuf) {
912 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
913 mode_cmd);
914 } else {
915 BUG();
916 }
917
918 if (ret)
919 return ERR_PTR(ret);
920
921 vfb->pin = vmw_framebuffer_pin;
922 vfb->unpin = vmw_framebuffer_unpin;
923
924 return vfb;
925}
926
1127/* 927/*
1128 * Generic Kernel modesetting functions 928 * Generic Kernel modesetting functions
1129 */ 929 */
@@ -1157,7 +957,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1157 if (!vmw_kms_validate_mode_vram(dev_priv, 957 if (!vmw_kms_validate_mode_vram(dev_priv,
1158 mode_cmd.pitch, 958 mode_cmd.pitch,
1159 mode_cmd.height)) { 959 mode_cmd.height)) {
1160 DRM_ERROR("VRAM size is too small for requested mode.\n"); 960 DRM_ERROR("Requested mode exceed bounding box limit.\n");
1161 return ERR_PTR(-ENOMEM); 961 return ERR_PTR(-ENOMEM);
1162 } 962 }
1163 963
@@ -1187,15 +987,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1187 if (ret) 987 if (ret)
1188 goto err_out; 988 goto err_out;
1189 989
1190 /* Create the new framebuffer depending one what we got back */ 990 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1191 if (bo) 991 !(dev_priv->capabilities & SVGA_CAP_3D),
1192 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 992 &mode_cmd);
1193 &mode_cmd); 993 if (IS_ERR(vfb)) {
1194 else if (surface) 994 ret = PTR_ERR(vfb);
1195 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, 995 goto err_out;
1196 surface, &vfb, &mode_cmd); 996 }
1197 else
1198 BUG();
1199 997
1200err_out: 998err_out:
1201 /* vmw_user_lookup_handle takes one ref so does new_fb */ 999 /* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
1218 .fb_create = vmw_kms_fb_create, 1016 .fb_create = vmw_kms_fb_create,
1219}; 1017};
1220 1018
1019static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1020 struct drm_file *file_priv,
1021 struct vmw_framebuffer *vfb,
1022 struct vmw_surface *surface,
1023 uint32_t sid,
1024 int32_t destX, int32_t destY,
1025 struct drm_vmw_rect *clips,
1026 uint32_t num_clips)
1027{
1028 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1029 &surface->res, destX, destY,
1030 num_clips, 1, NULL);
1031}
1032
1033
1221int vmw_kms_present(struct vmw_private *dev_priv, 1034int vmw_kms_present(struct vmw_private *dev_priv,
1222 struct drm_file *file_priv, 1035 struct drm_file *file_priv,
1223 struct vmw_framebuffer *vfb, 1036 struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1227 struct drm_vmw_rect *clips, 1040 struct drm_vmw_rect *clips,
1228 uint32_t num_clips) 1041 uint32_t num_clips)
1229{ 1042{
1230 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1043 int ret;
1231 struct drm_clip_rect *tmp;
1232 struct drm_crtc *crtc;
1233 size_t fifo_size;
1234 int i, k, num_units;
1235 int ret = 0; /* silence warning */
1236 int left, right, top, bottom;
1237
1238 struct {
1239 SVGA3dCmdHeader header;
1240 SVGA3dCmdBlitSurfaceToScreen body;
1241 } *cmd;
1242 SVGASignedRect *blits;
1243
1244 num_units = 0;
1245 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1246 if (crtc->primary->fb != &vfb->base)
1247 continue;
1248 units[num_units++] = vmw_crtc_to_du(crtc);
1249 }
1250
1251 BUG_ON(surface == NULL);
1252 BUG_ON(!clips || !num_clips);
1253
1254 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
1255 if (unlikely(tmp == NULL)) {
1256 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
1257 return -ENOMEM;
1258 }
1259
1260 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
1261 cmd = kmalloc(fifo_size, GFP_KERNEL);
1262 if (unlikely(cmd == NULL)) {
1263 DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1264 ret = -ENOMEM;
1265 goto out_free_tmp;
1266 }
1267
1268 left = clips->x;
1269 right = clips->x + clips->w;
1270 top = clips->y;
1271 bottom = clips->y + clips->h;
1272
1273 for (i = 1; i < num_clips; i++) {
1274 left = min_t(int, left, (int)clips[i].x);
1275 right = max_t(int, right, (int)clips[i].x + clips[i].w);
1276 top = min_t(int, top, (int)clips[i].y);
1277 bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
1278 }
1279
1280 /* only need to do this once */
1281 memset(cmd, 0, fifo_size);
1282 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
1283
1284 blits = (SVGASignedRect *)&cmd[1];
1285
1286 cmd->body.srcRect.left = left;
1287 cmd->body.srcRect.right = right;
1288 cmd->body.srcRect.top = top;
1289 cmd->body.srcRect.bottom = bottom;
1290
1291 for (i = 0; i < num_clips; i++) {
1292 tmp[i].x1 = clips[i].x - left;
1293 tmp[i].x2 = clips[i].x + clips[i].w - left;
1294 tmp[i].y1 = clips[i].y - top;
1295 tmp[i].y2 = clips[i].y + clips[i].h - top;
1296 }
1297
1298 for (k = 0; k < num_units; k++) {
1299 struct vmw_display_unit *unit = units[k];
1300 struct vmw_clip_rect clip;
1301 int num;
1302
1303 clip.x1 = left + destX - unit->crtc.x;
1304 clip.y1 = top + destY - unit->crtc.y;
1305 clip.x2 = right + destX - unit->crtc.x;
1306 clip.y2 = bottom + destY - unit->crtc.y;
1307
1308 /* skip any crtcs that misses the clip region */
1309 if (clip.x1 >= unit->crtc.mode.hdisplay ||
1310 clip.y1 >= unit->crtc.mode.vdisplay ||
1311 clip.x2 <= 0 || clip.y2 <= 0)
1312 continue;
1313
1314 /*
1315 * In order for the clip rects to be correctly scaled
1316 * the src and dest rects needs to be the same size.
1317 */
1318 cmd->body.destRect.left = clip.x1;
1319 cmd->body.destRect.right = clip.x2;
1320 cmd->body.destRect.top = clip.y1;
1321 cmd->body.destRect.bottom = clip.y2;
1322
1323 /* create a clip rect of the crtc in dest coords */
1324 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
1325 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
1326 clip.x1 = 0 - clip.x1;
1327 clip.y1 = 0 - clip.y1;
1328
1329 /* need to reset sid as it is changed by execbuf */
1330 cmd->body.srcImage.sid = sid;
1331 cmd->body.destScreenId = unit->unit;
1332
1333 /* clip and write blits to cmd stream */
1334 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
1335
1336 /* if no cliprects hit skip this */
1337 if (num == 0)
1338 continue;
1339
1340 /* recalculate package length */
1341 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
1342 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1343 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
1344 fifo_size, 0, NULL, NULL);
1345
1346 if (unlikely(ret != 0))
1347 break;
1348 }
1349
1350 kfree(cmd);
1351out_free_tmp:
1352 kfree(tmp);
1353
1354 return ret;
1355}
1356
1357int vmw_kms_readback(struct vmw_private *dev_priv,
1358 struct drm_file *file_priv,
1359 struct vmw_framebuffer *vfb,
1360 struct drm_vmw_fence_rep __user *user_fence_rep,
1361 struct drm_vmw_rect *clips,
1362 uint32_t num_clips)
1363{
1364 struct vmw_framebuffer_dmabuf *vfbd =
1365 vmw_framebuffer_to_vfbd(&vfb->base);
1366 struct vmw_dma_buffer *dmabuf = vfbd->buffer;
1367 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1368 struct drm_crtc *crtc;
1369 size_t fifo_size;
1370 int i, k, ret, num_units, blits_pos;
1371
1372 struct {
1373 uint32_t header;
1374 SVGAFifoCmdDefineGMRFB body;
1375 } *cmd;
1376 struct {
1377 uint32_t header;
1378 SVGAFifoCmdBlitScreenToGMRFB body;
1379 } *blits;
1380
1381 num_units = 0;
1382 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1383 if (crtc->primary->fb != &vfb->base)
1384 continue;
1385 units[num_units++] = vmw_crtc_to_du(crtc);
1386 }
1387
1388 BUG_ON(dmabuf == NULL);
1389 BUG_ON(!clips || !num_clips);
1390
1391 /* take a safe guess at fifo size */
1392 fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
1393 cmd = kmalloc(fifo_size, GFP_KERNEL);
1394 if (unlikely(cmd == NULL)) {
1395 DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1396 return -ENOMEM;
1397 }
1398
1399 memset(cmd, 0, fifo_size);
1400 cmd->header = SVGA_CMD_DEFINE_GMRFB;
1401 cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
1402 cmd->body.format.colorDepth = vfb->base.depth;
1403 cmd->body.format.reserved = 0;
1404 cmd->body.bytesPerLine = vfb->base.pitches[0];
1405 cmd->body.ptr.gmrId = vfb->user_handle;
1406 cmd->body.ptr.offset = 0;
1407
1408 blits = (void *)&cmd[1];
1409 blits_pos = 0;
1410 for (i = 0; i < num_units; i++) {
1411 struct drm_vmw_rect *c = clips;
1412 for (k = 0; k < num_clips; k++, c++) {
1413 /* transform clip coords to crtc origin based coords */
1414 int clip_x1 = c->x - units[i]->crtc.x;
1415 int clip_x2 = c->x - units[i]->crtc.x + c->w;
1416 int clip_y1 = c->y - units[i]->crtc.y;
1417 int clip_y2 = c->y - units[i]->crtc.y + c->h;
1418 int dest_x = c->x;
1419 int dest_y = c->y;
1420
1421 /* compensate for clipping, we negate
1422 * a negative number and add that.
1423 */
1424 if (clip_x1 < 0)
1425 dest_x += -clip_x1;
1426 if (clip_y1 < 0)
1427 dest_y += -clip_y1;
1428
1429 /* clip */
1430 clip_x1 = max(clip_x1, 0);
1431 clip_y1 = max(clip_y1, 0);
1432 clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
1433 clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
1434
1435 /* and cull any rects that misses the crtc */
1436 if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
1437 clip_y1 >= units[i]->crtc.mode.vdisplay ||
1438 clip_x2 <= 0 || clip_y2 <= 0)
1439 continue;
1440
1441 blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
1442 blits[blits_pos].body.srcScreenId = units[i]->unit;
1443 blits[blits_pos].body.destOrigin.x = dest_x;
1444 blits[blits_pos].body.destOrigin.y = dest_y;
1445 1044
1446 blits[blits_pos].body.srcRect.left = clip_x1; 1045 switch (dev_priv->active_display_unit) {
1447 blits[blits_pos].body.srcRect.top = clip_y1; 1046 case vmw_du_screen_target:
1448 blits[blits_pos].body.srcRect.right = clip_x2; 1047 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1449 blits[blits_pos].body.srcRect.bottom = clip_y2; 1048 &surface->res, destX, destY,
1450 blits_pos++; 1049 num_clips, 1, NULL);
1451 } 1050 break;
1051 case vmw_du_screen_object:
1052 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1053 sid, destX, destY, clips,
1054 num_clips);
1055 break;
1056 default:
1057 WARN_ONCE(true,
1058 "Present called with invalid display system.\n");
1059 ret = -ENOSYS;
1060 break;
1452 } 1061 }
1453 /* reset size here and use calculated exact size from loops */ 1062 if (ret)
1454 fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; 1063 return ret;
1455
1456 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
1457 0, user_fence_rep, NULL);
1458 1064
1459 kfree(cmd); 1065 vmw_fifo_flush(dev_priv, false);
1460 1066
1461 return ret; 1067 return 0;
1462} 1068}
1463 1069
1464int vmw_kms_init(struct vmw_private *dev_priv) 1070int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@ int vmw_kms_init(struct vmw_private *dev_priv)
1470 dev->mode_config.funcs = &vmw_kms_funcs; 1076 dev->mode_config.funcs = &vmw_kms_funcs;
1471 dev->mode_config.min_width = 1; 1077 dev->mode_config.min_width = 1;
1472 dev->mode_config.min_height = 1; 1078 dev->mode_config.min_height = 1;
1473 /* assumed largest fb size */ 1079 dev->mode_config.max_width = dev_priv->texture_max_width;
1474 dev->mode_config.max_width = 8192; 1080 dev->mode_config.max_height = dev_priv->texture_max_height;
1475 dev->mode_config.max_height = 8192;
1476 1081
1477 ret = vmw_kms_init_screen_object_display(dev_priv); 1082 ret = vmw_kms_stdu_init_display(dev_priv);
1478 if (ret) /* Fallback */ 1083 if (ret) {
1479 (void)vmw_kms_init_legacy_display_system(dev_priv); 1084 ret = vmw_kms_sou_init_display(dev_priv);
1085 if (ret) /* Fallback */
1086 ret = vmw_kms_ldu_init_display(dev_priv);
1087 }
1480 1088
1481 return 0; 1089 return ret;
1482} 1090}
1483 1091
1484int vmw_kms_close(struct vmw_private *dev_priv) 1092int vmw_kms_close(struct vmw_private *dev_priv)
1485{ 1093{
1094 int ret;
1095
1486 /* 1096 /*
1487 * Docs says we should take the lock before calling this function 1097 * Docs says we should take the lock before calling this function
1488 * but since it destroys encoders and our destructor calls 1098 * but since it destroys encoders and our destructor calls
1489 * drm_encoder_cleanup which takes the lock we deadlock. 1099 * drm_encoder_cleanup which takes the lock we deadlock.
1490 */ 1100 */
1491 drm_mode_config_cleanup(dev_priv->dev); 1101 drm_mode_config_cleanup(dev_priv->dev);
1492 if (dev_priv->sou_priv) 1102 if (dev_priv->active_display_unit == vmw_du_screen_object)
1493 vmw_kms_close_screen_object_display(dev_priv); 1103 ret = vmw_kms_sou_close_display(dev_priv);
1104 else if (dev_priv->active_display_unit == vmw_du_screen_target)
1105 ret = vmw_kms_stdu_close_display(dev_priv);
1494 else 1106 else
1495 vmw_kms_close_legacy_display_system(dev_priv); 1107 ret = vmw_kms_ldu_close_display(dev_priv);
1496 return 0; 1108
1109 return ret;
1497} 1110}
1498 1111
1499int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1112int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1569 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1182 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1570 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1183 else if (vmw_fifo_have_pitchlock(vmw_priv))
1571 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + 1184 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1572 SVGA_FIFO_PITCHLOCK); 1185 SVGA_FIFO_PITCHLOCK);
1573 1186
1574 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1187 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1575 return 0; 1188 return 0;
@@ -1641,7 +1254,9 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1641 uint32_t pitch, 1254 uint32_t pitch,
1642 uint32_t height) 1255 uint32_t height)
1643{ 1256{
1644 return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; 1257 return ((u64) pitch * (u64) height) < (u64)
1258 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
1259 dev_priv->prim_bb_mem : dev_priv->vram_size);
1645} 1260}
1646 1261
1647 1262
@@ -1715,75 +1330,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1715 return 0; 1330 return 0;
1716} 1331}
1717 1332
1718int vmw_du_page_flip(struct drm_crtc *crtc,
1719 struct drm_framebuffer *fb,
1720 struct drm_pending_vblank_event *event,
1721 uint32_t page_flip_flags)
1722{
1723 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1724 struct drm_framebuffer *old_fb = crtc->primary->fb;
1725 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
1726 struct drm_file *file_priv ;
1727 struct vmw_fence_obj *fence = NULL;
1728 struct drm_clip_rect clips;
1729 int ret;
1730
1731 if (event == NULL)
1732 return -EINVAL;
1733
1734 /* require ScreenObject support for page flipping */
1735 if (!dev_priv->sou_priv)
1736 return -ENOSYS;
1737
1738 file_priv = event->base.file_priv;
1739 if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
1740 return -EINVAL;
1741
1742 crtc->primary->fb = fb;
1743
1744 /* do a full screen dirty update */
1745 clips.x1 = clips.y1 = 0;
1746 clips.x2 = fb->width;
1747 clips.y2 = fb->height;
1748
1749 if (vfb->dmabuf)
1750 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
1751 0, 0, &clips, 1, 1, &fence);
1752 else
1753 ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
1754 0, 0, &clips, 1, 1, &fence);
1755
1756
1757 if (ret != 0)
1758 goto out_no_fence;
1759 if (!fence) {
1760 ret = -EINVAL;
1761 goto out_no_fence;
1762 }
1763
1764 ret = vmw_event_fence_action_queue(file_priv, fence,
1765 &event->base,
1766 &event->event.tv_sec,
1767 &event->event.tv_usec,
1768 true);
1769
1770 /*
1771 * No need to hold on to this now. The only cleanup
1772 * we need to do if we fail is unref the fence.
1773 */
1774 vmw_fence_obj_unreference(&fence);
1775
1776 if (vmw_crtc_to_du(crtc)->is_implicit)
1777 vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
1778
1779 return ret;
1780
1781out_no_fence:
1782 crtc->primary->fb = old_fb;
1783 return ret;
1784}
1785
1786
1787void vmw_du_crtc_save(struct drm_crtc *crtc) 1333void vmw_du_crtc_save(struct drm_crtc *crtc)
1788{ 1334{
1789} 1335}
@@ -1920,7 +1466,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
1920 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 1466 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1921 * members filled in. 1467 * members filled in.
1922 */ 1468 */
1923static void vmw_guess_mode_timing(struct drm_display_mode *mode) 1469void vmw_guess_mode_timing(struct drm_display_mode *mode)
1924{ 1470{
1925 mode->hsync_start = mode->hdisplay + 50; 1471 mode->hsync_start = mode->hdisplay + 50;
1926 mode->hsync_end = mode->hsync_start + 50; 1472 mode->hsync_end = mode->hsync_start + 50;
@@ -1955,36 +1501,39 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1955 * If using screen objects, then assume 32-bpp because that's what the 1501 * If using screen objects, then assume 32-bpp because that's what the
1956 * SVGA device is assuming 1502 * SVGA device is assuming
1957 */ 1503 */
1958 if (dev_priv->sou_priv) 1504 if (dev_priv->active_display_unit == vmw_du_screen_object)
1959 assumed_bpp = 4; 1505 assumed_bpp = 4;
1960 1506
1507 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1508 max_width = min(max_width, dev_priv->stdu_max_width);
1509 max_height = min(max_height, dev_priv->stdu_max_height);
1510 }
1511
1961 /* Add preferred mode */ 1512 /* Add preferred mode */
1962 { 1513 mode = drm_mode_duplicate(dev, &prefmode);
1963 mode = drm_mode_duplicate(dev, &prefmode); 1514 if (!mode)
1964 if (!mode) 1515 return 0;
1965 return 0; 1516 mode->hdisplay = du->pref_width;
1966 mode->hdisplay = du->pref_width; 1517 mode->vdisplay = du->pref_height;
1967 mode->vdisplay = du->pref_height; 1518 vmw_guess_mode_timing(mode);
1968 vmw_guess_mode_timing(mode);
1969
1970 if (vmw_kms_validate_mode_vram(dev_priv,
1971 mode->hdisplay * assumed_bpp,
1972 mode->vdisplay)) {
1973 drm_mode_probed_add(connector, mode);
1974 } else {
1975 drm_mode_destroy(dev, mode);
1976 mode = NULL;
1977 }
1978 1519
1979 if (du->pref_mode) { 1520 if (vmw_kms_validate_mode_vram(dev_priv,
1980 list_del_init(&du->pref_mode->head); 1521 mode->hdisplay * assumed_bpp,
1981 drm_mode_destroy(dev, du->pref_mode); 1522 mode->vdisplay)) {
1982 } 1523 drm_mode_probed_add(connector, mode);
1524 } else {
1525 drm_mode_destroy(dev, mode);
1526 mode = NULL;
1527 }
1983 1528
1984 /* mode might be null here, this is intended */ 1529 if (du->pref_mode) {
1985 du->pref_mode = mode; 1530 list_del_init(&du->pref_mode->head);
1531 drm_mode_destroy(dev, du->pref_mode);
1986 } 1532 }
1987 1533
1534 /* mode might be null here, this is intended */
1535 du->pref_mode = mode;
1536
1988 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 1537 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1989 bmode = &vmw_kms_connector_builtin[i]; 1538 bmode = &vmw_kms_connector_builtin[i];
1990 if (bmode->hdisplay > max_width || 1539 if (bmode->hdisplay > max_width ||
@@ -2004,11 +1553,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2004 drm_mode_probed_add(connector, mode); 1553 drm_mode_probed_add(connector, mode);
2005 } 1554 }
2006 1555
2007 /* Move the prefered mode first, help apps pick the right mode. */
2008 if (du->pref_mode)
2009 list_move(&du->pref_mode->head, &connector->probed_modes);
2010
2011 drm_mode_connector_list_update(connector, true); 1556 drm_mode_connector_list_update(connector, true);
1557 /* Move the prefered mode first, help apps pick the right mode. */
1558 drm_mode_sort(&connector->modes);
2012 1559
2013 return 1; 1560 return 1;
2014} 1561}
@@ -2032,7 +1579,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2032 unsigned rects_size; 1579 unsigned rects_size;
2033 int ret; 1580 int ret;
2034 int i; 1581 int i;
1582 u64 total_pixels = 0;
2035 struct drm_mode_config *mode_config = &dev->mode_config; 1583 struct drm_mode_config *mode_config = &dev->mode_config;
1584 struct drm_vmw_rect bounding_box = {0};
2036 1585
2037 if (!arg->num_outputs) { 1586 if (!arg->num_outputs) {
2038 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 1587 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2063,6 +1612,40 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2063 ret = -EINVAL; 1612 ret = -EINVAL;
2064 goto out_free; 1613 goto out_free;
2065 } 1614 }
1615
1616 /*
1617 * bounding_box.w and bunding_box.h are used as
1618 * lower-right coordinates
1619 */
1620 if (rects[i].x + rects[i].w > bounding_box.w)
1621 bounding_box.w = rects[i].x + rects[i].w;
1622
1623 if (rects[i].y + rects[i].h > bounding_box.h)
1624 bounding_box.h = rects[i].y + rects[i].h;
1625
1626 total_pixels += (u64) rects[i].w * (u64) rects[i].h;
1627 }
1628
1629 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1630 /*
1631 * For Screen Targets, the limits for a toplogy are:
1632 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1633 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1634 */
1635 u64 bb_mem = bounding_box.w * bounding_box.h * 4;
1636 u64 pixel_mem = total_pixels * 4;
1637
1638 if (bb_mem > dev_priv->prim_bb_mem) {
1639 DRM_ERROR("Topology is beyond supported limits.\n");
1640 ret = -EINVAL;
1641 goto out_free;
1642 }
1643
1644 if (pixel_mem > dev_priv->prim_bb_mem) {
1645 DRM_ERROR("Combined output size too large\n");
1646 ret = -EINVAL;
1647 goto out_free;
1648 }
2066 } 1649 }
2067 1650
2068 vmw_du_update_layout(dev_priv, arg->num_outputs, rects); 1651 vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2071,3 +1654,419 @@ out_free:
2071 kfree(rects); 1654 kfree(rects);
2072 return ret; 1655 return ret;
2073} 1656}
1657
1658/**
1659 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1660 * on a set of cliprects and a set of display units.
1661 *
1662 * @dev_priv: Pointer to a device private structure.
1663 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1664 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1665 * Cliprects are given in framebuffer coordinates.
1666 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1667 * be NULL. Cliprects are given in source coordinates.
1668 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1669 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1670 * @num_clips: Number of cliprects in the @clips or @vclips array.
1671 * @increment: Integer with which to increment the clip counter when looping.
1672 * Used to skip a predetermined number of clip rects.
1673 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1674 */
1675int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1676 struct vmw_framebuffer *framebuffer,
1677 const struct drm_clip_rect *clips,
1678 const struct drm_vmw_rect *vclips,
1679 s32 dest_x, s32 dest_y,
1680 int num_clips,
1681 int increment,
1682 struct vmw_kms_dirty *dirty)
1683{
1684 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1685 struct drm_crtc *crtc;
1686 u32 num_units = 0;
1687 u32 i, k;
1688 int ret;
1689
1690 dirty->dev_priv = dev_priv;
1691
1692 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1693 if (crtc->primary->fb != &framebuffer->base)
1694 continue;
1695 units[num_units++] = vmw_crtc_to_du(crtc);
1696 }
1697
1698 for (k = 0; k < num_units; k++) {
1699 struct vmw_display_unit *unit = units[k];
1700 s32 crtc_x = unit->crtc.x;
1701 s32 crtc_y = unit->crtc.y;
1702 s32 crtc_width = unit->crtc.mode.hdisplay;
1703 s32 crtc_height = unit->crtc.mode.vdisplay;
1704 const struct drm_clip_rect *clips_ptr = clips;
1705 const struct drm_vmw_rect *vclips_ptr = vclips;
1706
1707 dirty->unit = unit;
1708 if (dirty->fifo_reserve_size > 0) {
1709 dirty->cmd = vmw_fifo_reserve(dev_priv,
1710 dirty->fifo_reserve_size);
1711 if (!dirty->cmd) {
1712 DRM_ERROR("Couldn't reserve fifo space "
1713 "for dirty blits.\n");
1714 return ret;
1715 }
1716 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1717 }
1718 dirty->num_hits = 0;
1719 for (i = 0; i < num_clips; i++, clips_ptr += increment,
1720 vclips_ptr += increment) {
1721 s32 clip_left;
1722 s32 clip_top;
1723
1724 /*
1725 * Select clip array type. Note that integer type
1726 * in @clips is unsigned short, whereas in @vclips
1727 * it's 32-bit.
1728 */
1729 if (clips) {
1730 dirty->fb_x = (s32) clips_ptr->x1;
1731 dirty->fb_y = (s32) clips_ptr->y1;
1732 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1733 crtc_x;
1734 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1735 crtc_y;
1736 } else {
1737 dirty->fb_x = vclips_ptr->x;
1738 dirty->fb_y = vclips_ptr->y;
1739 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1740 dest_x - crtc_x;
1741 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1742 dest_y - crtc_y;
1743 }
1744
1745 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1746 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1747
1748 /* Skip this clip if it's outside the crtc region */
1749 if (dirty->unit_x1 >= crtc_width ||
1750 dirty->unit_y1 >= crtc_height ||
1751 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1752 continue;
1753
1754 /* Clip right and bottom to crtc limits */
1755 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1756 crtc_width);
1757 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1758 crtc_height);
1759
1760 /* Clip left and top to crtc limits */
1761 clip_left = min_t(s32, dirty->unit_x1, 0);
1762 clip_top = min_t(s32, dirty->unit_y1, 0);
1763 dirty->unit_x1 -= clip_left;
1764 dirty->unit_y1 -= clip_top;
1765 dirty->fb_x -= clip_left;
1766 dirty->fb_y -= clip_top;
1767
1768 dirty->clip(dirty);
1769 }
1770
1771 dirty->fifo_commit(dirty);
1772 }
1773
1774 return 0;
1775}
1776
1777/**
1778 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1779 * command submission.
1780 *
1781 * @dev_priv. Pointer to a device private structure.
1782 * @buf: The buffer object
1783 * @interruptible: Whether to perform waits as interruptible.
1784 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1785 * The buffer will be validated as a GMR. Already pinned buffers will not be
1786 * validated.
1787 *
1788 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1789 * interrupted by a signal.
1790 */
1791int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1792 struct vmw_dma_buffer *buf,
1793 bool interruptible,
1794 bool validate_as_mob)
1795{
1796 struct ttm_buffer_object *bo = &buf->base;
1797 int ret;
1798
1799 ttm_bo_reserve(bo, false, false, interruptible, NULL);
1800 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
1801 validate_as_mob);
1802 if (ret)
1803 ttm_bo_unreserve(bo);
1804
1805 return ret;
1806}
1807
1808/**
1809 * vmw_kms_helper_buffer_revert - Undo the actions of
1810 * vmw_kms_helper_buffer_prepare.
1811 *
1812 * @res: Pointer to the buffer object.
1813 *
1814 * Helper to be used if an error forces the caller to undo the actions of
1815 * vmw_kms_helper_buffer_prepare.
1816 */
1817void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
1818{
1819 if (buf)
1820 ttm_bo_unreserve(&buf->base);
1821}
1822
1823/**
1824 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1825 * kms command submission.
1826 *
1827 * @dev_priv: Pointer to a device private structure.
1828 * @file_priv: Pointer to a struct drm_file representing the caller's
1829 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1830 * if non-NULL, @user_fence_rep must be non-NULL.
1831 * @buf: The buffer object.
1832 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1833 * ref-counted fence pointer is returned here.
1834 * @user_fence_rep: Optional pointer to a user-space provided struct
1835 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1836 * function copies fence data to user-space in a fail-safe manner.
1837 */
1838void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
1839 struct drm_file *file_priv,
1840 struct vmw_dma_buffer *buf,
1841 struct vmw_fence_obj **out_fence,
1842 struct drm_vmw_fence_rep __user *
1843 user_fence_rep)
1844{
1845 struct vmw_fence_obj *fence;
1846 uint32_t handle;
1847 int ret;
1848
1849 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1850 file_priv ? &handle : NULL);
1851 if (buf)
1852 vmw_fence_single_bo(&buf->base, fence);
1853 if (file_priv)
1854 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1855 ret, user_fence_rep, fence,
1856 handle);
1857 if (out_fence)
1858 *out_fence = fence;
1859 else
1860 vmw_fence_obj_unreference(&fence);
1861
1862 vmw_kms_helper_buffer_revert(buf);
1863}
1864
1865
1866/**
1867 * vmw_kms_helper_resource_revert - Undo the actions of
1868 * vmw_kms_helper_resource_prepare.
1869 *
1870 * @res: Pointer to the resource. Typically a surface.
1871 *
1872 * Helper to be used if an error forces the caller to undo the actions of
1873 * vmw_kms_helper_resource_prepare.
1874 */
1875void vmw_kms_helper_resource_revert(struct vmw_resource *res)
1876{
1877 vmw_kms_helper_buffer_revert(res->backup);
1878 vmw_resource_unreserve(res, false, NULL, 0);
1879 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1880}
1881
1882/**
1883 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1884 * command submission.
1885 *
1886 * @res: Pointer to the resource. Typically a surface.
1887 * @interruptible: Whether to perform waits as interruptible.
1888 *
1889 * Reserves and validates also the backup buffer if a guest-backed resource.
1890 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1891 * interrupted by a signal.
1892 */
1893int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1894 bool interruptible)
1895{
1896 int ret = 0;
1897
1898 if (interruptible)
1899 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
1900 else
1901 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1902
1903 if (unlikely(ret != 0))
1904 return -ERESTARTSYS;
1905
1906 ret = vmw_resource_reserve(res, interruptible, false);
1907 if (ret)
1908 goto out_unlock;
1909
1910 if (res->backup) {
1911 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
1912 interruptible,
1913 res->dev_priv->has_mob);
1914 if (ret)
1915 goto out_unreserve;
1916 }
1917 ret = vmw_resource_validate(res);
1918 if (ret)
1919 goto out_revert;
1920 return 0;
1921
1922out_revert:
1923 vmw_kms_helper_buffer_revert(res->backup);
1924out_unreserve:
1925 vmw_resource_unreserve(res, false, NULL, 0);
1926out_unlock:
1927 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1928 return ret;
1929}
1930
1931/**
1932 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
1933 * kms command submission.
1934 *
1935 * @res: Pointer to the resource. Typically a surface.
1936 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1937 * ref-counted fence pointer is returned here.
1938 */
1939void vmw_kms_helper_resource_finish(struct vmw_resource *res,
1940 struct vmw_fence_obj **out_fence)
1941{
1942 if (res->backup || out_fence)
1943 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
1944 out_fence, NULL);
1945
1946 vmw_resource_unreserve(res, false, NULL, 0);
1947 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1948}
1949
1950/**
1951 * vmw_kms_update_proxy - Helper function to update a proxy surface from
1952 * its backing MOB.
1953 *
1954 * @res: Pointer to the surface resource
1955 * @clips: Clip rects in framebuffer (surface) space.
1956 * @num_clips: Number of clips in @clips.
1957 * @increment: Integer with which to increment the clip counter when looping.
1958 * Used to skip a predetermined number of clip rects.
1959 *
1960 * This function makes sure the proxy surface is updated from its backing MOB
1961 * using the region given by @clips. The surface resource @res and its backing
1962 * MOB needs to be reserved and validated on call.
1963 */
1964int vmw_kms_update_proxy(struct vmw_resource *res,
1965 const struct drm_clip_rect *clips,
1966 unsigned num_clips,
1967 int increment)
1968{
1969 struct vmw_private *dev_priv = res->dev_priv;
1970 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
1971 struct {
1972 SVGA3dCmdHeader header;
1973 SVGA3dCmdUpdateGBImage body;
1974 } *cmd;
1975 SVGA3dBox *box;
1976 size_t copy_size = 0;
1977 int i;
1978
1979 if (!clips)
1980 return 0;
1981
1982 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
1983 if (!cmd) {
1984 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
1985 "update.\n");
1986 return -ENOMEM;
1987 }
1988
1989 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
1990 box = &cmd->body.box;
1991
1992 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1993 cmd->header.size = sizeof(cmd->body);
1994 cmd->body.image.sid = res->id;
1995 cmd->body.image.face = 0;
1996 cmd->body.image.mipmap = 0;
1997
1998 if (clips->x1 > size->width || clips->x2 > size->width ||
1999 clips->y1 > size->height || clips->y2 > size->height) {
2000 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2001 return -EINVAL;
2002 }
2003
2004 box->x = clips->x1;
2005 box->y = clips->y1;
2006 box->z = 0;
2007 box->w = clips->x2 - clips->x1;
2008 box->h = clips->y2 - clips->y1;
2009 box->d = 1;
2010
2011 copy_size += sizeof(*cmd);
2012 }
2013
2014 vmw_fifo_commit(dev_priv, copy_size);
2015
2016 return 0;
2017}
2018
2019int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2020 unsigned unit,
2021 u32 max_width,
2022 u32 max_height,
2023 struct drm_connector **p_con,
2024 struct drm_crtc **p_crtc,
2025 struct drm_display_mode **p_mode)
2026{
2027 struct drm_connector *con;
2028 struct vmw_display_unit *du;
2029 struct drm_display_mode *mode;
2030 int i = 0;
2031
2032 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2033 head) {
2034 if (i == unit)
2035 break;
2036
2037 ++i;
2038 }
2039
2040 if (i != unit) {
2041 DRM_ERROR("Could not find initial display unit.\n");
2042 return -EINVAL;
2043 }
2044
2045 if (list_empty(&con->modes))
2046 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2047
2048 if (list_empty(&con->modes)) {
2049 DRM_ERROR("Could not find initial display mode.\n");
2050 return -EINVAL;
2051 }
2052
2053 du = vmw_connector_to_du(con);
2054 *p_con = con;
2055 *p_crtc = &du->crtc;
2056
2057 list_for_each_entry(mode, &con->modes, head) {
2058 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2059 break;
2060 }
2061
2062 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2063 *p_mode = mode;
2064 else {
2065 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2066 *p_mode = list_first_entry(&con->modes,
2067 struct drm_display_mode,
2068 head);
2069 }
2070
2071 return 0;
2072}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index f1a324cfb4c3..782df7ca9794 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
32#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
33#include "vmwgfx_drv.h" 33#include "vmwgfx_drv.h"
34 34
35/**
36 * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
37 * function.
38 *
39 * @fifo_commit: Callback that is called once for each display unit after
40 * all clip rects. This function must commit the fifo space reserved by the
41 * helper. Set up by the caller.
42 * @clip: Callback that is called for each cliprect on each display unit.
43 * Set up by the caller.
44 * @fifo_reserve_size: Fifo size that the helper should try to allocat for
45 * each display unit. Set up by the caller.
46 * @dev_priv: Pointer to the device private. Set up by the helper.
47 * @unit: The current display unit. Set up by the helper before a call to @clip.
48 * @cmd: The allocated fifo space. Set up by the helper before the first @clip
49 * call.
50 * @num_hits: Number of clip rect commands for this display unit.
51 * Cleared by the helper before the first @clip call. Updated by the @clip
52 * callback.
53 * @fb_x: Clip rect left side in framebuffer coordinates.
54 * @fb_y: Clip rect right side in framebuffer coordinates.
55 * @unit_x1: Clip rect left side in crtc coordinates.
56 * @unit_y1: Clip rect top side in crtc coordinates.
57 * @unit_x2: Clip rect right side in crtc coordinates.
58 * @unit_y2: Clip rect bottom side in crtc coordinates.
59 *
60 * The clip rect coordinates are updated by the helper for each @clip call.
61 * Note that this may be derived from if more info needs to be passed between
62 * helper caller and helper callbacks.
63 */
64struct vmw_kms_dirty {
65 void (*fifo_commit)(struct vmw_kms_dirty *);
66 void (*clip)(struct vmw_kms_dirty *);
67 size_t fifo_reserve_size;
68 struct vmw_private *dev_priv;
69 struct vmw_display_unit *unit;
70 void *cmd;
71 u32 num_hits;
72 s32 fb_x;
73 s32 fb_y;
74 s32 unit_x1;
75 s32 unit_y1;
76 s32 unit_x2;
77 s32 unit_y2;
78};
79
35#define VMWGFX_NUM_DISPLAY_UNITS 8 80#define VMWGFX_NUM_DISPLAY_UNITS 8
36 81
37 82
38#define vmw_framebuffer_to_vfb(x) \ 83#define vmw_framebuffer_to_vfb(x) \
39 container_of(x, struct vmw_framebuffer, base) 84 container_of(x, struct vmw_framebuffer, base)
85#define vmw_framebuffer_to_vfbs(x) \
86 container_of(x, struct vmw_framebuffer_surface, base.base)
87#define vmw_framebuffer_to_vfbd(x) \
88 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
40 89
41/** 90/**
42 * Base class for framebuffers 91 * Base class for framebuffers
@@ -53,9 +102,27 @@ struct vmw_framebuffer {
53 uint32_t user_handle; 102 uint32_t user_handle;
54}; 103};
55 104
105/*
106 * Clip rectangle
107 */
108struct vmw_clip_rect {
109 int x1, x2, y1, y2;
110};
111
112struct vmw_framebuffer_surface {
113 struct vmw_framebuffer base;
114 struct vmw_surface *surface;
115 struct vmw_dma_buffer *buffer;
116 struct list_head head;
117 bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
118};
119
120
121struct vmw_framebuffer_dmabuf {
122 struct vmw_framebuffer base;
123 struct vmw_dma_buffer *buffer;
124};
56 125
57#define vmw_crtc_to_du(x) \
58 container_of(x, struct vmw_display_unit, crtc)
59 126
60/* 127/*
61 * Basic cursor manipulation 128 * Basic cursor manipulation
@@ -120,11 +187,7 @@ struct vmw_display_unit {
120/* 187/*
121 * Shared display unit functions - vmwgfx_kms.c 188 * Shared display unit functions - vmwgfx_kms.c
122 */ 189 */
123void vmw_display_unit_cleanup(struct vmw_display_unit *du); 190void vmw_du_cleanup(struct vmw_display_unit *du);
124int vmw_du_page_flip(struct drm_crtc *crtc,
125 struct drm_framebuffer *fb,
126 struct drm_pending_vblank_event *event,
127 uint32_t page_flip_flags);
128void vmw_du_crtc_save(struct drm_crtc *crtc); 191void vmw_du_crtc_save(struct drm_crtc *crtc);
129void vmw_du_crtc_restore(struct drm_crtc *crtc); 192void vmw_du_crtc_restore(struct drm_crtc *crtc);
130void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 193void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
143int vmw_du_connector_set_property(struct drm_connector *connector, 206int vmw_du_connector_set_property(struct drm_connector *connector,
144 struct drm_property *property, 207 struct drm_property *property,
145 uint64_t val); 208 uint64_t val);
209int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
210 struct vmw_framebuffer *framebuffer,
211 const struct drm_clip_rect *clips,
212 const struct drm_vmw_rect *vclips,
213 s32 dest_x, s32 dest_y,
214 int num_clips,
215 int increment,
216 struct vmw_kms_dirty *dirty);
146 217
218int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
219 struct vmw_dma_buffer *buf,
220 bool interruptible,
221 bool validate_as_mob);
222void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
223void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
224 struct drm_file *file_priv,
225 struct vmw_dma_buffer *buf,
226 struct vmw_fence_obj **out_fence,
227 struct drm_vmw_fence_rep __user *
228 user_fence_rep);
229int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
230 bool interruptible);
231void vmw_kms_helper_resource_revert(struct vmw_resource *res);
232void vmw_kms_helper_resource_finish(struct vmw_resource *res,
233 struct vmw_fence_obj **out_fence);
234int vmw_kms_readback(struct vmw_private *dev_priv,
235 struct drm_file *file_priv,
236 struct vmw_framebuffer *vfb,
237 struct drm_vmw_fence_rep __user *user_fence_rep,
238 struct drm_vmw_rect *vclips,
239 uint32_t num_clips);
240struct vmw_framebuffer *
241vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
242 struct vmw_dma_buffer *dmabuf,
243 struct vmw_surface *surface,
244 bool only_2d,
245 const struct drm_mode_fb_cmd *mode_cmd);
246int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
247 unsigned unit,
248 u32 max_width,
249 u32 max_height,
250 struct drm_connector **p_con,
251 struct drm_crtc **p_crtc,
252 struct drm_display_mode **p_mode);
253void vmw_guess_mode_timing(struct drm_display_mode *mode);
147 254
148/* 255/*
149 * Legacy display unit functions - vmwgfx_ldu.c 256 * Legacy display unit functions - vmwgfx_ldu.c
150 */ 257 */
151int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); 258int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
152int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); 259int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
260int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
261 struct vmw_framebuffer *framebuffer,
262 unsigned flags, unsigned color,
263 struct drm_clip_rect *clips,
264 unsigned num_clips, int increment);
265int vmw_kms_update_proxy(struct vmw_resource *res,
266 const struct drm_clip_rect *clips,
267 unsigned num_clips,
268 int increment);
153 269
154/* 270/*
155 * Screen Objects display functions - vmwgfx_scrn.c 271 * Screen Objects display functions - vmwgfx_scrn.c
156 */ 272 */
157int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv); 273int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
158int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); 274int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
159int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, 275int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
160 struct drm_vmw_rect *rects); 276 struct vmw_framebuffer *framebuffer,
161bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, 277 struct drm_clip_rect *clips,
162 struct drm_crtc *crtc); 278 struct drm_vmw_rect *vclips,
163void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, 279 struct vmw_resource *srf,
164 struct drm_crtc *crtc); 280 s32 dest_x,
281 s32 dest_y,
282 unsigned num_clips, int inc,
283 struct vmw_fence_obj **out_fence);
284int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
285 struct vmw_framebuffer *framebuffer,
286 struct drm_clip_rect *clips,
287 unsigned num_clips, int increment,
288 bool interruptible,
289 struct vmw_fence_obj **out_fence);
290int vmw_kms_sou_readback(struct vmw_private *dev_priv,
291 struct drm_file *file_priv,
292 struct vmw_framebuffer *vfb,
293 struct drm_vmw_fence_rep __user *user_fence_rep,
294 struct drm_vmw_rect *vclips,
295 uint32_t num_clips);
296
297/*
298 * Screen Target Display Unit functions - vmwgfx_stdu.c
299 */
300int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
301int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
302int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
303 struct vmw_framebuffer *framebuffer,
304 struct drm_clip_rect *clips,
305 struct drm_vmw_rect *vclips,
306 struct vmw_resource *srf,
307 s32 dest_x,
308 s32 dest_y,
309 unsigned num_clips, int inc,
310 struct vmw_fence_obj **out_fence);
311int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
312 struct drm_file *file_priv,
313 struct vmw_framebuffer *vfb,
314 struct drm_vmw_fence_rep __user *user_fence_rep,
315 struct drm_clip_rect *clips,
316 struct drm_vmw_rect *vclips,
317 uint32_t num_clips,
318 int increment,
319 bool to_surface,
320 bool interruptible);
165 321
166 322
167#endif 323#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f748ab4..bb63e4d795fa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@ struct vmw_legacy_display_unit {
57static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) 57static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
58{ 58{
59 list_del_init(&ldu->active); 59 list_del_init(&ldu->active);
60 vmw_display_unit_cleanup(&ldu->base); 60 vmw_du_cleanup(&ldu->base);
61 kfree(ldu); 61 kfree(ldu);
62} 62}
63 63
@@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
279 return -EINVAL; 279 return -EINVAL;
280 } 280 }
281 281
282 vmw_fb_off(dev_priv); 282 vmw_svga_enable(dev_priv);
283 283
284 crtc->primary->fb = fb; 284 crtc->primary->fb = fb;
285 encoder->crtc = crtc; 285 encoder->crtc = crtc;
@@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
385 return 0; 385 return 0;
386} 386}
387 387
388int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 388int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
389{ 389{
390 struct drm_device *dev = dev_priv->dev; 390 struct drm_device *dev = dev_priv->dev;
391 int i, ret; 391 int i, ret;
@@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
422 else 422 else
423 vmw_ldu_init(dev_priv, 0); 423 vmw_ldu_init(dev_priv, 0);
424 424
425 dev_priv->active_display_unit = vmw_du_legacy;
426
427 DRM_INFO("Legacy Display Unit initialized\n");
428
425 return 0; 429 return 0;
426 430
427err_vblank_cleanup: 431err_vblank_cleanup:
@@ -432,7 +436,7 @@ err_free:
432 return ret; 436 return ret;
433} 437}
434 438
435int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 439int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
436{ 440{
437 struct drm_device *dev = dev_priv->dev; 441 struct drm_device *dev = dev_priv->dev;
438 442
@@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
447 451
448 return 0; 452 return 0;
449} 453}
454
455
456int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
457 struct vmw_framebuffer *framebuffer,
458 unsigned flags, unsigned color,
459 struct drm_clip_rect *clips,
460 unsigned num_clips, int increment)
461{
462 size_t fifo_size;
463 int i;
464
465 struct {
466 uint32_t header;
467 SVGAFifoCmdUpdate body;
468 } *cmd;
469
470 fifo_size = sizeof(*cmd) * num_clips;
471 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
472 if (unlikely(cmd == NULL)) {
473 DRM_ERROR("Fifo reserve failed.\n");
474 return -ENOMEM;
475 }
476
477 memset(cmd, 0, fifo_size);
478 for (i = 0; i < num_clips; i++, clips += increment) {
479 cmd[i].header = SVGA_CMD_UPDATE;
480 cmd[i].body.x = clips->x1;
481 cmd[i].body.y = clips->y1;
482 cmd[i].body.width = clips->x2 - clips->x1;
483 cmd[i].body.height = clips->y2 - clips->y1;
484 }
485
486 vmw_fifo_commit(dev_priv, fifo_size);
487 return 0;
488}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8cd3cd..23db16008e39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
31 * If we set up the screen target otable, screen objects stop working. 31 * If we set up the screen target otable, screen objects stop working.
32 */ 32 */
33 33
34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) 34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
35 35
36#ifdef CONFIG_64BIT 36#ifdef CONFIG_64BIT
37#define VMW_PPN_SIZE 8 37#define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@ struct vmw_mob {
67 * @size: Size of the table (page-aligned). 67 * @size: Size of the table (page-aligned).
68 * @page_table: Pointer to a struct vmw_mob holding the page table. 68 * @page_table: Pointer to a struct vmw_mob holding the page table.
69 */ 69 */
70struct vmw_otable { 70static const struct vmw_otable pre_dx_tables[] = {
71 unsigned long size; 71 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
72 struct vmw_mob *page_table; 72 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
73 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
74 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
75 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
76 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
77};
78
79static const struct vmw_otable dx_tables[] = {
80 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
81 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
82 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
83 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
84 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
85 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
86 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
73}; 87};
74 88
75static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 89static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
92 */ 106 */
93static int vmw_setup_otable_base(struct vmw_private *dev_priv, 107static int vmw_setup_otable_base(struct vmw_private *dev_priv,
94 SVGAOTableType type, 108 SVGAOTableType type,
109 struct ttm_buffer_object *otable_bo,
95 unsigned long offset, 110 unsigned long offset,
96 struct vmw_otable *otable) 111 struct vmw_otable *otable)
97{ 112{
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
106 121
107 BUG_ON(otable->page_table != NULL); 122 BUG_ON(otable->page_table != NULL);
108 123
109 vsgt = vmw_bo_sg_table(dev_priv->otable_bo); 124 vsgt = vmw_bo_sg_table(otable_bo);
110 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); 125 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
111 WARN_ON(!vmw_piter_next(&iter)); 126 WARN_ON(!vmw_piter_next(&iter));
112 127
@@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
142 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; 157 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
143 cmd->header.size = sizeof(cmd->body); 158 cmd->header.size = sizeof(cmd->body);
144 cmd->body.type = type; 159 cmd->body.type = type;
145 cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); 160 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
146 cmd->body.sizeInBytes = otable->size; 161 cmd->body.sizeInBytes = otable->size;
147 cmd->body.validSizeInBytes = 0; 162 cmd->body.validSizeInBytes = 0;
148 cmd->body.ptDepth = mob->pt_level; 163 cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
191 if (unlikely(cmd == NULL)) { 206 if (unlikely(cmd == NULL)) {
192 DRM_ERROR("Failed reserving FIFO space for OTable " 207 DRM_ERROR("Failed reserving FIFO space for OTable "
193 "takedown.\n"); 208 "takedown.\n");
194 } else { 209 return;
195 memset(cmd, 0, sizeof(*cmd));
196 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
197 cmd->header.size = sizeof(cmd->body);
198 cmd->body.type = type;
199 cmd->body.baseAddress = 0;
200 cmd->body.sizeInBytes = 0;
201 cmd->body.validSizeInBytes = 0;
202 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
203 vmw_fifo_commit(dev_priv, sizeof(*cmd));
204 } 210 }
205 211
212 memset(cmd, 0, sizeof(*cmd));
213 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
214 cmd->header.size = sizeof(cmd->body);
215 cmd->body.type = type;
216 cmd->body.baseAddress = 0;
217 cmd->body.sizeInBytes = 0;
218 cmd->body.validSizeInBytes = 0;
219 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
220 vmw_fifo_commit(dev_priv, sizeof(*cmd));
221
206 if (bo) { 222 if (bo) {
207 int ret; 223 int ret;
208 224
@@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
217 otable->page_table = NULL; 233 otable->page_table = NULL;
218} 234}
219 235
220/* 236
221 * vmw_otables_setup - Set up guest backed memory object tables 237static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
222 * 238 struct vmw_otable_batch *batch)
223 * @dev_priv: Pointer to a device private structure
224 *
225 * Takes care of the device guest backed surface
226 * initialization, by setting up the guest backed memory object tables.
227 * Returns 0 on success and various error codes on failure. A succesful return
228 * means the object tables can be taken down using the vmw_otables_takedown
229 * function.
230 */
231int vmw_otables_setup(struct vmw_private *dev_priv)
232{ 239{
233 unsigned long offset; 240 unsigned long offset;
234 unsigned long bo_size; 241 unsigned long bo_size;
235 struct vmw_otable *otables; 242 struct vmw_otable *otables = batch->otables;
236 SVGAOTableType i; 243 SVGAOTableType i;
237 int ret; 244 int ret;
238 245
239 otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
240 GFP_KERNEL);
241 if (unlikely(otables == NULL)) {
242 DRM_ERROR("Failed to allocate space for otable "
243 "metadata.\n");
244 return -ENOMEM;
245 }
246
247 otables[SVGA_OTABLE_MOB].size =
248 VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
249 otables[SVGA_OTABLE_SURFACE].size =
250 VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
251 otables[SVGA_OTABLE_CONTEXT].size =
252 VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
253 otables[SVGA_OTABLE_SHADER].size =
254 VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
255 otables[SVGA_OTABLE_SCREEN_TARGET].size =
256 VMWGFX_NUM_GB_SCREEN_TARGET *
257 SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
258
259 bo_size = 0; 246 bo_size = 0;
260 for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { 247 for (i = 0; i < batch->num_otables; ++i) {
248 if (!otables[i].enabled)
249 continue;
250
261 otables[i].size = 251 otables[i].size =
262 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 252 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
263 bo_size += otables[i].size; 253 bo_size += otables[i].size;
@@ -267,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
267 ttm_bo_type_device, 257 ttm_bo_type_device,
268 &vmw_sys_ne_placement, 258 &vmw_sys_ne_placement,
269 0, false, NULL, 259 0, false, NULL,
270 &dev_priv->otable_bo); 260 &batch->otable_bo);
271 261
272 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
273 goto out_no_bo; 263 goto out_no_bo;
274 264
275 ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); 265 ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
276 BUG_ON(ret != 0); 266 BUG_ON(ret != 0);
277 ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); 267 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
278 if (unlikely(ret != 0)) 268 if (unlikely(ret != 0))
279 goto out_unreserve; 269 goto out_unreserve;
280 ret = vmw_bo_map_dma(dev_priv->otable_bo); 270 ret = vmw_bo_map_dma(batch->otable_bo);
281 if (unlikely(ret != 0)) 271 if (unlikely(ret != 0))
282 goto out_unreserve; 272 goto out_unreserve;
283 273
284 ttm_bo_unreserve(dev_priv->otable_bo); 274 ttm_bo_unreserve(batch->otable_bo);
285 275
286 offset = 0; 276 offset = 0;
287 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { 277 for (i = 0; i < batch->num_otables; ++i) {
288 ret = vmw_setup_otable_base(dev_priv, i, offset, 278 if (!batch->otables[i].enabled)
279 continue;
280
281 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
282 offset,
289 &otables[i]); 283 &otables[i]);
290 if (unlikely(ret != 0)) 284 if (unlikely(ret != 0))
291 goto out_no_setup; 285 goto out_no_setup;
292 offset += otables[i].size; 286 offset += otables[i].size;
293 } 287 }
294 288
295 dev_priv->otables = otables;
296 return 0; 289 return 0;
297 290
298out_unreserve: 291out_unreserve:
299 ttm_bo_unreserve(dev_priv->otable_bo); 292 ttm_bo_unreserve(batch->otable_bo);
300out_no_setup: 293out_no_setup:
301 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 294 for (i = 0; i < batch->num_otables; ++i) {
302 vmw_takedown_otable_base(dev_priv, i, &otables[i]); 295 if (batch->otables[i].enabled)
296 vmw_takedown_otable_base(dev_priv, i,
297 &batch->otables[i]);
298 }
303 299
304 ttm_bo_unref(&dev_priv->otable_bo); 300 ttm_bo_unref(&batch->otable_bo);
305out_no_bo: 301out_no_bo:
306 kfree(otables);
307 return ret; 302 return ret;
308} 303}
309 304
310
311/* 305/*
312 * vmw_otables_takedown - Take down guest backed memory object tables 306 * vmw_otables_setup - Set up guest backed memory object tables
313 * 307 *
314 * @dev_priv: Pointer to a device private structure 308 * @dev_priv: Pointer to a device private structure
315 * 309 *
316 * Take down the Guest Memory Object tables. 310 * Takes care of the device guest backed surface
311 * initialization, by setting up the guest backed memory object tables.
312 * Returns 0 on success and various error codes on failure. A successful return
313 * means the object tables can be taken down using the vmw_otables_takedown
314 * function.
317 */ 315 */
318void vmw_otables_takedown(struct vmw_private *dev_priv) 316int vmw_otables_setup(struct vmw_private *dev_priv)
317{
318 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
319 int ret;
320
321 if (dev_priv->has_dx) {
322 *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
323 if (*otables == NULL)
324 return -ENOMEM;
325
326 memcpy(*otables, dx_tables, sizeof(dx_tables));
327 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
328 } else {
329 *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
330 if (*otables == NULL)
331 return -ENOMEM;
332
333 memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
334 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
335 }
336
337 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
338 if (unlikely(ret != 0))
339 goto out_setup;
340
341 return 0;
342
343out_setup:
344 kfree(*otables);
345 return ret;
346}
347
348static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
349 struct vmw_otable_batch *batch)
319{ 350{
320 SVGAOTableType i; 351 SVGAOTableType i;
321 struct ttm_buffer_object *bo = dev_priv->otable_bo; 352 struct ttm_buffer_object *bo = batch->otable_bo;
322 int ret; 353 int ret;
323 354
324 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 355 for (i = 0; i < batch->num_otables; ++i)
325 vmw_takedown_otable_base(dev_priv, i, 356 if (batch->otables[i].enabled)
326 &dev_priv->otables[i]); 357 vmw_takedown_otable_base(dev_priv, i,
358 &batch->otables[i]);
327 359
328 ret = ttm_bo_reserve(bo, false, true, false, NULL); 360 ret = ttm_bo_reserve(bo, false, true, false, NULL);
329 BUG_ON(ret != 0); 361 BUG_ON(ret != 0);
@@ -331,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
331 vmw_fence_single_bo(bo, NULL); 363 vmw_fence_single_bo(bo, NULL);
332 ttm_bo_unreserve(bo); 364 ttm_bo_unreserve(bo);
333 365
334 ttm_bo_unref(&dev_priv->otable_bo); 366 ttm_bo_unref(&batch->otable_bo);
335 kfree(dev_priv->otables);
336 dev_priv->otables = NULL;
337} 367}
338 368
369/*
370 * vmw_otables_takedown - Take down guest backed memory object tables
371 *
372 * @dev_priv: Pointer to a device private structure
373 *
374 * Take down the Guest Memory Object tables.
375 */
376void vmw_otables_takedown(struct vmw_private *dev_priv)
377{
378 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
379 kfree(dev_priv->otable_batch.otables);
380}
339 381
340/* 382/*
341 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages 383 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
@@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
409 goto out_unreserve; 451 goto out_unreserve;
410 452
411 ttm_bo_unreserve(mob->pt_bo); 453 ttm_bo_unreserve(mob->pt_bo);
412 454
413 return 0; 455 return 0;
414 456
415out_unreserve: 457out_unreserve:
@@ -429,15 +471,15 @@ out_unreserve:
429 * *@addr according to the page table entry size. 471 * *@addr according to the page table entry size.
430 */ 472 */
431#if (VMW_PPN_SIZE == 8) 473#if (VMW_PPN_SIZE == 8)
432static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) 474static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
433{ 475{
434 *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); 476 *((u64 *) *addr) = val >> PAGE_SHIFT;
435 *addr += 2; 477 *addr += 2;
436} 478}
437#else 479#else
438static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) 480static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
439{ 481{
440 *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); 482 *(*addr)++ = val >> PAGE_SHIFT;
441} 483}
442#endif 484#endif
443 485
@@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
459 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; 501 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
460 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); 502 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
461 unsigned long pt_page; 503 unsigned long pt_page;
462 __le32 *addr, *save_addr; 504 u32 *addr, *save_addr;
463 unsigned long i; 505 unsigned long i;
464 struct page *page; 506 struct page *page;
465 507
@@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
574 vmw_fence_single_bo(bo, NULL); 616 vmw_fence_single_bo(bo, NULL);
575 ttm_bo_unreserve(bo); 617 ttm_bo_unreserve(bo);
576 } 618 }
577 vmw_3d_resource_dec(dev_priv, false); 619 vmw_fifo_resource_dec(dev_priv);
578} 620}
579 621
580/* 622/*
@@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
627 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 669 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
628 } 670 }
629 671
630 (void) vmw_3d_resource_inc(dev_priv, false); 672 vmw_fifo_resource_inc(dev_priv);
631 673
632 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 674 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
633 if (unlikely(cmd == NULL)) { 675 if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
640 cmd->header.size = sizeof(cmd->body); 682 cmd->header.size = sizeof(cmd->body);
641 cmd->body.mobid = mob_id; 683 cmd->body.mobid = mob_id;
642 cmd->body.ptDepth = mob->pt_level; 684 cmd->body.ptDepth = mob->pt_level;
643 cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); 685 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
644 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; 686 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
645 687
646 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 688 vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
648 return 0; 690 return 0;
649 691
650out_no_cmd_space: 692out_no_cmd_space:
651 vmw_3d_resource_dec(dev_priv, false); 693 vmw_fifo_resource_dec(dev_priv);
652 if (pt_set_up) 694 if (pt_set_up)
653 ttm_bo_unref(&mob->pt_bo); 695 ttm_bo_unref(&mob->pt_bo);
654 696
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f68e9d0..76069f093ccf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
31 31
32#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
33 33
34#include "svga_overlay.h" 34#include "device_include/svga_overlay.h"
35#include "svga_escape.h" 35#include "device_include/svga_escape.h"
36 36
37#define VMW_MAX_NUM_STREAMS 1 37#define VMW_MAX_NUM_STREAMS 1
38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) 38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
100{ 100{
101 struct vmw_escape_video_flush *flush; 101 struct vmw_escape_video_flush *flush;
102 size_t fifo_size; 102 size_t fifo_size;
103 bool have_so = dev_priv->sou_priv ? true : false; 103 bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
104 int i, num_items; 104 int i, num_items;
105 SVGAGuestPtr ptr; 105 SVGAGuestPtr ptr;
106 106
@@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
231 if (!pin) 231 if (!pin)
232 return vmw_dmabuf_unpin(dev_priv, buf, inter); 232 return vmw_dmabuf_unpin(dev_priv, buf, inter);
233 233
234 if (!dev_priv->sou_priv) 234 if (dev_priv->active_display_unit == vmw_du_legacy)
235 return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); 235 return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
236 236
237 return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); 237 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
238} 238}
239 239
240/** 240/**
@@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
453 453
454static bool vmw_overlay_available(const struct vmw_private *dev_priv) 454static bool vmw_overlay_available(const struct vmw_private *dev_priv)
455{ 455{
456 return (dev_priv->overlay_priv != NULL && 456 return (dev_priv->overlay_priv != NULL &&
457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) == 457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
458 VMW_OVERLAY_CAP_MASK)); 458 VMW_OVERLAY_CAP_MASK));
459} 459}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a342eb..dce798053a96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
39#define VMWGFX_IRQSTATUS_PORT 0x8 39#define VMWGFX_IRQSTATUS_PORT 0x8
40 40
41struct svga_guest_mem_descriptor { 41struct svga_guest_mem_descriptor {
42 __le32 ppn; 42 u32 ppn;
43 __le32 num_pages; 43 u32 num_pages;
44}; 44};
45 45
46struct svga_fifo_cmd_fence { 46struct svga_fifo_cmd_fence {
47 __le32 fence; 47 u32 fence;
48}; 48};
49 49
50#define SVGA_SYNC_GENERIC 1 50#define SVGA_SYNC_GENERIC 1
51#define SVGA_SYNC_FIFOFULL 2 51#define SVGA_SYNC_FIFOFULL 2
52 52
53#include "svga_types.h" 53#include "device_include/svga3d_reg.h"
54
55#include "svga3d_reg.h"
56 54
57#endif 55#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15b1d09..c1912f852b42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include "vmwgfx_resource_priv.h" 33#include "vmwgfx_resource_priv.h"
34#include "vmwgfx_binding.h"
34 35
35#define VMW_RES_EVICT_ERR_COUNT 10 36#define VMW_RES_EVICT_ERR_COUNT 10
36 37
@@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref)
121 int id; 122 int id;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 123 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 124
125 write_lock(&dev_priv->resource_lock);
124 res->avail = false; 126 res->avail = false;
125 list_del_init(&res->lru_head); 127 list_del_init(&res->lru_head);
126 write_unlock(&dev_priv->resource_lock); 128 write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
143 } 145 }
144 146
145 if (likely(res->hw_destroy != NULL)) { 147 if (likely(res->hw_destroy != NULL)) {
146 res->hw_destroy(res);
147 mutex_lock(&dev_priv->binding_mutex); 148 mutex_lock(&dev_priv->binding_mutex);
148 vmw_context_binding_res_list_kill(&res->binding_head); 149 vmw_binding_res_list_kill(&res->binding_head);
149 mutex_unlock(&dev_priv->binding_mutex); 150 mutex_unlock(&dev_priv->binding_mutex);
151 res->hw_destroy(res);
150 } 152 }
151 153
152 id = res->id; 154 id = res->id;
@@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref)
156 kfree(res); 158 kfree(res);
157 159
158 write_lock(&dev_priv->resource_lock); 160 write_lock(&dev_priv->resource_lock);
159
160 if (id != -1) 161 if (id != -1)
161 idr_remove(idr, id); 162 idr_remove(idr, id);
163 write_unlock(&dev_priv->resource_lock);
162} 164}
163 165
164void vmw_resource_unreference(struct vmw_resource **p_res) 166void vmw_resource_unreference(struct vmw_resource **p_res)
165{ 167{
166 struct vmw_resource *res = *p_res; 168 struct vmw_resource *res = *p_res;
167 struct vmw_private *dev_priv = res->dev_priv;
168 169
169 *p_res = NULL; 170 *p_res = NULL;
170 write_lock(&dev_priv->resource_lock);
171 kref_put(&res->kref, vmw_resource_release); 171 kref_put(&res->kref, vmw_resource_release);
172 write_unlock(&dev_priv->resource_lock);
173} 172}
174 173
175 174
@@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res,
260 write_unlock(&dev_priv->resource_lock); 259 write_unlock(&dev_priv->resource_lock);
261} 260}
262 261
263struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, 262static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
264 struct idr *idr, int id) 263 struct idr *idr, int id)
265{ 264{
266 struct vmw_resource *res; 265 struct vmw_resource *res;
267 266
268 read_lock(&dev_priv->resource_lock); 267 read_lock(&dev_priv->resource_lock);
269 res = idr_find(idr, id); 268 res = idr_find(idr, id);
270 if (res && res->avail) 269 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
271 kref_get(&res->kref);
272 else
273 res = NULL; 270 res = NULL;
271
274 read_unlock(&dev_priv->resource_lock); 272 read_unlock(&dev_priv->resource_lock);
275 273
276 if (unlikely(res == NULL)) 274 if (unlikely(res == NULL))
@@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
900 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 898 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
901 vmw_user_stream_size, 899 vmw_user_stream_size,
902 false, true); 900 false, true);
901 ttm_read_unlock(&dev_priv->reservation_sem);
903 if (unlikely(ret != 0)) { 902 if (unlikely(ret != 0)) {
904 if (ret != -ERESTARTSYS) 903 if (ret != -ERESTARTSYS)
905 DRM_ERROR("Out of graphics memory for stream" 904 DRM_ERROR("Out of graphics memory for stream"
906 " creation.\n"); 905 " creation.\n");
907 goto out_unlock;
908 }
909 906
907 goto out_ret;
908 }
910 909
911 stream = kmalloc(sizeof(*stream), GFP_KERNEL); 910 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
912 if (unlikely(stream == NULL)) { 911 if (unlikely(stream == NULL)) {
913 ttm_mem_global_free(vmw_mem_glob(dev_priv), 912 ttm_mem_global_free(vmw_mem_glob(dev_priv),
914 vmw_user_stream_size); 913 vmw_user_stream_size);
915 ret = -ENOMEM; 914 ret = -ENOMEM;
916 goto out_unlock; 915 goto out_ret;
917 } 916 }
918 917
919 res = &stream->stream.res; 918 res = &stream->stream.res;
@@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
926 925
927 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); 926 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
928 if (unlikely(ret != 0)) 927 if (unlikely(ret != 0))
929 goto out_unlock; 928 goto out_ret;
930 929
931 tmp = vmw_resource_reference(res); 930 tmp = vmw_resource_reference(res);
932 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, 931 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
940 arg->stream_id = res->id; 939 arg->stream_id = res->id;
941out_err: 940out_err:
942 vmw_resource_unreference(&res); 941 vmw_resource_unreference(&res);
943out_unlock: 942out_ret:
944 ttm_read_unlock(&dev_priv->reservation_sem);
945 return ret; 943 return ret;
946} 944}
947 945
@@ -1152,14 +1150,16 @@ out_bind_failed:
1152 * command submission. 1150 * command submission.
1153 * 1151 *
1154 * @res: Pointer to the struct vmw_resource to unreserve. 1152 * @res: Pointer to the struct vmw_resource to unreserve.
1153 * @switch_backup: Backup buffer has been switched.
1155 * @new_backup: Pointer to new backup buffer if command submission 1154 * @new_backup: Pointer to new backup buffer if command submission
1156 * switched. 1155 * switched. May be NULL.
1157 * @new_backup_offset: New backup offset if @new_backup is !NULL. 1156 * @new_backup_offset: New backup offset if @switch_backup is true.
1158 * 1157 *
1159 * Currently unreserving a resource means putting it back on the device's 1158 * Currently unreserving a resource means putting it back on the device's
1160 * resource lru list, so that it can be evicted if necessary. 1159 * resource lru list, so that it can be evicted if necessary.
1161 */ 1160 */
1162void vmw_resource_unreserve(struct vmw_resource *res, 1161void vmw_resource_unreserve(struct vmw_resource *res,
1162 bool switch_backup,
1163 struct vmw_dma_buffer *new_backup, 1163 struct vmw_dma_buffer *new_backup,
1164 unsigned long new_backup_offset) 1164 unsigned long new_backup_offset)
1165{ 1165{
@@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res,
1168 if (!list_empty(&res->lru_head)) 1168 if (!list_empty(&res->lru_head))
1169 return; 1169 return;
1170 1170
1171 if (new_backup && new_backup != res->backup) { 1171 if (switch_backup && new_backup != res->backup) {
1172
1173 if (res->backup) { 1172 if (res->backup) {
1174 lockdep_assert_held(&res->backup->base.resv->lock.base); 1173 lockdep_assert_held(&res->backup->base.resv->lock.base);
1175 list_del_init(&res->mob_head); 1174 list_del_init(&res->mob_head);
1176 vmw_dmabuf_unreference(&res->backup); 1175 vmw_dmabuf_unreference(&res->backup);
1177 } 1176 }
1178 1177
1179 res->backup = vmw_dmabuf_reference(new_backup); 1178 if (new_backup) {
1180 lockdep_assert_held(&new_backup->base.resv->lock.base); 1179 res->backup = vmw_dmabuf_reference(new_backup);
1181 list_add_tail(&res->mob_head, &new_backup->res_list); 1180 lockdep_assert_held(&new_backup->base.resv->lock.base);
1181 list_add_tail(&res->mob_head, &new_backup->res_list);
1182 } else {
1183 res->backup = NULL;
1184 }
1182 } 1185 }
1183 if (new_backup) 1186 if (switch_backup)
1184 res->backup_offset = new_backup_offset; 1187 res->backup_offset = new_backup_offset;
1185 1188
1186 if (!res->func->may_evict || res->id == -1) 1189 if (!res->func->may_evict || res->id == -1 || res->pin_count)
1187 return; 1190 return;
1188 1191
1189 write_lock(&dev_priv->resource_lock); 1192 write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@ out_no_reserve:
1259 * the buffer may not be bound to the resource at this point. 1262 * the buffer may not be bound to the resource at this point.
1260 * 1263 *
1261 */ 1264 */
1262int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) 1265int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1266 bool no_backup)
1263{ 1267{
1264 struct vmw_private *dev_priv = res->dev_priv; 1268 struct vmw_private *dev_priv = res->dev_priv;
1265 int ret; 1269 int ret;
@@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1270 1274
1271 if (res->func->needs_backup && res->backup == NULL && 1275 if (res->func->needs_backup && res->backup == NULL &&
1272 !no_backup) { 1276 !no_backup) {
1273 ret = vmw_resource_buf_alloc(res, true); 1277 ret = vmw_resource_buf_alloc(res, interruptible);
1274 if (unlikely(ret != 0)) 1278 if (unlikely(ret != 0)) {
1279 DRM_ERROR("Failed to allocate a backup buffer "
1280 "of size %lu. bytes\n",
1281 (unsigned long) res->backup_size);
1275 return ret; 1282 return ret;
1283 }
1276 } 1284 }
1277 1285
1278 return 0; 1286 return 0;
@@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1305 * @res: The resource to evict. 1313 * @res: The resource to evict.
1306 * @interruptible: Whether to wait interruptible. 1314 * @interruptible: Whether to wait interruptible.
1307 */ 1315 */
1308int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) 1316static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1309{ 1317{
1310 struct ttm_validate_buffer val_buf; 1318 struct ttm_validate_buffer val_buf;
1311 const struct vmw_res_func *func = res->func; 1319 const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1356 struct ttm_validate_buffer val_buf; 1364 struct ttm_validate_buffer val_buf;
1357 unsigned err_count = 0; 1365 unsigned err_count = 0;
1358 1366
1359 if (likely(!res->func->may_evict)) 1367 if (!res->func->create)
1360 return 0; 1368 return 0;
1361 1369
1362 val_buf.bo = NULL; 1370 val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1443/** 1451/**
1444 * vmw_resource_move_notify - TTM move_notify_callback 1452 * vmw_resource_move_notify - TTM move_notify_callback
1445 * 1453 *
1446 * @bo: The TTM buffer object about to move. 1454 * @bo: The TTM buffer object about to move.
1447 * @mem: The truct ttm_mem_reg indicating to what memory 1455 * @mem: The struct ttm_mem_reg indicating to what memory
1448 * region the move is taking place. 1456 * region the move is taking place.
1449 * 1457 *
1450 * Evicts the Guest Backed hardware resource if the backup 1458 * Evicts the Guest Backed hardware resource if the backup
1451 * buffer is being moved out of MOB memory. 1459 * buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1495 } 1503 }
1496} 1504}
1497 1505
1506
1507
1508/**
1509 * vmw_query_readback_all - Read back cached query states
1510 *
1511 * @dx_query_mob: Buffer containing the DX query MOB
1512 *
1513 * Read back cached states from the device if they exist. This function
1514 * assumings binding_mutex is held.
1515 */
1516int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1517{
1518 struct vmw_resource *dx_query_ctx;
1519 struct vmw_private *dev_priv;
1520 struct {
1521 SVGA3dCmdHeader header;
1522 SVGA3dCmdDXReadbackAllQuery body;
1523 } *cmd;
1524
1525
1526 /* No query bound, so do nothing */
1527 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1528 return 0;
1529
1530 dx_query_ctx = dx_query_mob->dx_query_ctx;
1531 dev_priv = dx_query_ctx->dev_priv;
1532
1533 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1534 if (unlikely(cmd == NULL)) {
1535 DRM_ERROR("Failed reserving FIFO space for "
1536 "query MOB read back.\n");
1537 return -ENOMEM;
1538 }
1539
1540 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1541 cmd->header.size = sizeof(cmd->body);
1542 cmd->body.cid = dx_query_ctx->id;
1543
1544 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1545
1546 /* Triggers a rebind the next time affected context is bound */
1547 dx_query_mob->dx_query_ctx = NULL;
1548
1549 return 0;
1550}
1551
1552
1553
1554/**
1555 * vmw_query_move_notify - Read back cached query states
1556 *
1557 * @bo: The TTM buffer object about to move.
1558 * @mem: The memory region @bo is moving to.
1559 *
1560 * Called before the query MOB is swapped out to read back cached query
1561 * states from the device.
1562 */
1563void vmw_query_move_notify(struct ttm_buffer_object *bo,
1564 struct ttm_mem_reg *mem)
1565{
1566 struct vmw_dma_buffer *dx_query_mob;
1567 struct ttm_bo_device *bdev = bo->bdev;
1568 struct vmw_private *dev_priv;
1569
1570
1571 dev_priv = container_of(bdev, struct vmw_private, bdev);
1572
1573 mutex_lock(&dev_priv->binding_mutex);
1574
1575 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1576 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1577 mutex_unlock(&dev_priv->binding_mutex);
1578 return;
1579 }
1580
1581 /* If BO is being moved from MOB to system memory */
1582 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1583 struct vmw_fence_obj *fence;
1584
1585 (void) vmw_query_readback_all(dx_query_mob);
1586 mutex_unlock(&dev_priv->binding_mutex);
1587
1588 /* Create a fence and attach the BO to it */
1589 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1590 vmw_fence_single_bo(bo, fence);
1591
1592 if (fence != NULL)
1593 vmw_fence_obj_unreference(&fence);
1594
1595 (void) ttm_bo_wait(bo, false, false, false);
1596 } else
1597 mutex_unlock(&dev_priv->binding_mutex);
1598
1599}
1600
1498/** 1601/**
1499 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. 1602 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1500 * 1603 *
@@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
1573 1676
1574 mutex_unlock(&dev_priv->cmdbuf_mutex); 1677 mutex_unlock(&dev_priv->cmdbuf_mutex);
1575} 1678}
1679
1680/**
1681 * vmw_resource_pin - Add a pin reference on a resource
1682 *
1683 * @res: The resource to add a pin reference on
1684 *
1685 * This function adds a pin reference, and if needed validates the resource.
1686 * Having a pin reference means that the resource can never be evicted, and
1687 * its id will never change as long as there is a pin reference.
1688 * This function returns 0 on success and a negative error code on failure.
1689 */
1690int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1691{
1692 struct vmw_private *dev_priv = res->dev_priv;
1693 int ret;
1694
1695 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1696 mutex_lock(&dev_priv->cmdbuf_mutex);
1697 ret = vmw_resource_reserve(res, interruptible, false);
1698 if (ret)
1699 goto out_no_reserve;
1700
1701 if (res->pin_count == 0) {
1702 struct vmw_dma_buffer *vbo = NULL;
1703
1704 if (res->backup) {
1705 vbo = res->backup;
1706
1707 ttm_bo_reserve(&vbo->base, interruptible, false, false,
1708 NULL);
1709 if (!vbo->pin_count) {
1710 ret = ttm_bo_validate
1711 (&vbo->base,
1712 res->func->backup_placement,
1713 interruptible, false);
1714 if (ret) {
1715 ttm_bo_unreserve(&vbo->base);
1716 goto out_no_validate;
1717 }
1718 }
1719
1720 /* Do we really need to pin the MOB as well? */
1721 vmw_bo_pin_reserved(vbo, true);
1722 }
1723 ret = vmw_resource_validate(res);
1724 if (vbo)
1725 ttm_bo_unreserve(&vbo->base);
1726 if (ret)
1727 goto out_no_validate;
1728 }
1729 res->pin_count++;
1730
1731out_no_validate:
1732 vmw_resource_unreserve(res, false, NULL, 0UL);
1733out_no_reserve:
1734 mutex_unlock(&dev_priv->cmdbuf_mutex);
1735 ttm_write_unlock(&dev_priv->reservation_sem);
1736
1737 return ret;
1738}
1739
1740/**
1741 * vmw_resource_unpin - Remove a pin reference from a resource
1742 *
1743 * @res: The resource to remove a pin reference from
1744 *
1745 * Having a pin reference means that the resource can never be evicted, and
1746 * its id will never change as long as there is a pin reference.
1747 */
1748void vmw_resource_unpin(struct vmw_resource *res)
1749{
1750 struct vmw_private *dev_priv = res->dev_priv;
1751 int ret;
1752
1753 ttm_read_lock(&dev_priv->reservation_sem, false);
1754 mutex_lock(&dev_priv->cmdbuf_mutex);
1755
1756 ret = vmw_resource_reserve(res, false, true);
1757 WARN_ON(ret);
1758
1759 WARN_ON(res->pin_count == 0);
1760 if (--res->pin_count == 0 && res->backup) {
1761 struct vmw_dma_buffer *vbo = res->backup;
1762
1763 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1764 vmw_bo_pin_reserved(vbo, false);
1765 ttm_bo_unreserve(&vbo->base);
1766 }
1767
1768 vmw_resource_unreserve(res, false, NULL, 0UL);
1769
1770 mutex_unlock(&dev_priv->cmdbuf_mutex);
1771 ttm_read_unlock(&dev_priv->reservation_sem);
1772}
1773
1774/**
1775 * vmw_res_type - Return the resource type
1776 *
1777 * @res: Pointer to the resource
1778 */
1779enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1780{
1781 return res->func->res_type;
1782}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed2854c..5994ef6265e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
30 30
31#include "vmwgfx_drv.h" 31#include "vmwgfx_drv.h"
32 32
33enum vmw_cmdbuf_res_state {
34 VMW_CMDBUF_RES_COMMITTED,
35 VMW_CMDBUF_RES_ADD,
36 VMW_CMDBUF_RES_DEL
37};
38
33/** 39/**
34 * struct vmw_user_resource_conv - Identify a derived user-exported resource 40 * struct vmw_user_resource_conv - Identify a derived user-exported resource
35 * type and provide a function to convert its ttm_base_object pointer to 41 * type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
55 * @bind: Bind a hardware resource to persistent buffer storage. 61 * @bind: Bind a hardware resource to persistent buffer storage.
56 * @unbind: Unbind a hardware resource from persistent 62 * @unbind: Unbind a hardware resource from persistent
57 * buffer storage. 63 * buffer storage.
64 * @commit_notify: If the resource is a command buffer managed resource,
65 * callback to notify that a define or remove command
66 * has been committed to the device.
58 */ 67 */
59
60struct vmw_res_func { 68struct vmw_res_func {
61 enum vmw_res_type res_type; 69 enum vmw_res_type res_type;
62 bool needs_backup; 70 bool needs_backup;
@@ -71,6 +79,8 @@ struct vmw_res_func {
71 int (*unbind) (struct vmw_resource *res, 79 int (*unbind) (struct vmw_resource *res,
72 bool readback, 80 bool readback,
73 struct ttm_validate_buffer *val_buf); 81 struct ttm_validate_buffer *val_buf);
82 void (*commit_notify)(struct vmw_resource *res,
83 enum vmw_cmdbuf_res_state state);
74}; 84};
75 85
76int vmw_resource_alloc_id(struct vmw_resource *res); 86int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d04d9a..b96d1ab610c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
36#define vmw_connector_to_sou(x) \ 36#define vmw_connector_to_sou(x) \
37 container_of(x, struct vmw_screen_object_unit, base.connector) 37 container_of(x, struct vmw_screen_object_unit, base.connector)
38 38
39/**
40 * struct vmw_kms_sou_surface_dirty - Closure structure for
41 * blit surface to screen command.
42 * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
43 * @left: Left side of bounding box.
44 * @right: Right side of bounding box.
45 * @top: Top side of bounding box.
46 * @bottom: Bottom side of bounding box.
47 * @dst_x: Difference between source clip rects and framebuffer coordinates.
48 * @dst_y: Difference between source clip rects and framebuffer coordinates.
49 * @sid: Surface id of surface to copy from.
50 */
51struct vmw_kms_sou_surface_dirty {
52 struct vmw_kms_dirty base;
53 s32 left, right, top, bottom;
54 s32 dst_x, dst_y;
55 u32 sid;
56};
57
58/*
59 * SVGA commands that are used by this code. Please see the device headers
60 * for explanation.
61 */
62struct vmw_kms_sou_readback_blit {
63 uint32 header;
64 SVGAFifoCmdBlitScreenToGMRFB body;
65};
66
67struct vmw_kms_sou_dmabuf_blit {
68 uint32 header;
69 SVGAFifoCmdBlitGMRFBToScreen body;
70};
71
72struct vmw_kms_sou_dirty_cmd {
73 SVGA3dCmdHeader header;
74 SVGA3dCmdBlitSurfaceToScreen body;
75};
76
77
78/*
79 * Other structs.
80 */
81
39struct vmw_screen_object_display { 82struct vmw_screen_object_display {
40 unsigned num_implicit; 83 unsigned num_implicit;
41 84
42 struct vmw_framebuffer *implicit_fb; 85 struct vmw_framebuffer *implicit_fb;
86 SVGAFifoCmdDefineGMRFB cur;
87 struct vmw_dma_buffer *pinned_gmrfb;
43}; 88};
44 89
45/** 90/**
@@ -57,7 +102,7 @@ struct vmw_screen_object_unit {
57 102
58static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) 103static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
59{ 104{
60 vmw_display_unit_cleanup(&sou->base); 105 vmw_du_cleanup(&sou->base);
61 kfree(sou); 106 kfree(sou);
62} 107}
63 108
@@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
72} 117}
73 118
74static void vmw_sou_del_active(struct vmw_private *vmw_priv, 119static void vmw_sou_del_active(struct vmw_private *vmw_priv,
75 struct vmw_screen_object_unit *sou) 120 struct vmw_screen_object_unit *sou)
76{ 121{
77 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 122 struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
78 123
@@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv,
84} 129}
85 130
86static void vmw_sou_add_active(struct vmw_private *vmw_priv, 131static void vmw_sou_add_active(struct vmw_private *vmw_priv,
87 struct vmw_screen_object_unit *sou, 132 struct vmw_screen_object_unit *sou,
88 struct vmw_framebuffer *vfb) 133 struct vmw_framebuffer *vfb)
89{ 134{
90 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 135 struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
91 136
@@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
202static void vmw_sou_backing_free(struct vmw_private *dev_priv, 247static void vmw_sou_backing_free(struct vmw_private *dev_priv,
203 struct vmw_screen_object_unit *sou) 248 struct vmw_screen_object_unit *sou)
204{ 249{
205 struct ttm_buffer_object *bo; 250 vmw_dmabuf_unreference(&sou->buffer);
206
207 if (unlikely(sou->buffer == NULL))
208 return;
209
210 bo = &sou->buffer->base;
211 ttm_bo_unref(&bo);
212 sou->buffer = NULL;
213 sou->buffer_size = 0; 251 sou->buffer_size = 0;
214} 252}
215 253
@@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
274 dev_priv = vmw_priv(crtc->dev); 312 dev_priv = vmw_priv(crtc->dev);
275 313
276 if (set->num_connectors > 1) { 314 if (set->num_connectors > 1) {
277 DRM_ERROR("to many connectors\n"); 315 DRM_ERROR("Too many connectors\n");
278 return -EINVAL; 316 return -EINVAL;
279 } 317 }
280 318
281 if (set->num_connectors == 1 && 319 if (set->num_connectors == 1 &&
282 set->connectors[0] != &sou->base.connector) { 320 set->connectors[0] != &sou->base.connector) {
283 DRM_ERROR("connector doesn't match %p %p\n", 321 DRM_ERROR("Connector doesn't match %p %p\n",
284 set->connectors[0], &sou->base.connector); 322 set->connectors[0], &sou->base.connector);
285 return -EINVAL; 323 return -EINVAL;
286 } 324 }
@@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
331 return -EINVAL; 369 return -EINVAL;
332 } 370 }
333 371
334 vmw_fb_off(dev_priv); 372 vmw_svga_enable(dev_priv);
335 373
336 if (mode->hdisplay != crtc->mode.hdisplay || 374 if (mode->hdisplay != crtc->mode.hdisplay ||
337 mode->vdisplay != crtc->mode.vdisplay) { 375 mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
390 return 0; 428 return 0;
391} 429}
392 430
431/**
432 * Returns if this unit can be page flipped.
433 * Must be called with the mode_config mutex held.
434 */
435static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
436 struct drm_crtc *crtc)
437{
438 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
439
440 if (!sou->base.is_implicit)
441 return true;
442
443 if (dev_priv->sou_priv->num_implicit != 1)
444 return false;
445
446 return true;
447}
448
449/**
450 * Update the implicit fb to the current fb of this crtc.
451 * Must be called with the mode_config mutex held.
452 */
453static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
454 struct drm_crtc *crtc)
455{
456 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
457
458 BUG_ON(!sou->base.is_implicit);
459
460 dev_priv->sou_priv->implicit_fb =
461 vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
462}
463
464static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
465 struct drm_framebuffer *fb,
466 struct drm_pending_vblank_event *event,
467 uint32_t flags)
468{
469 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
470 struct drm_framebuffer *old_fb = crtc->primary->fb;
471 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
472 struct vmw_fence_obj *fence = NULL;
473 struct drm_clip_rect clips;
474 int ret;
475
476 /* require ScreenObject support for page flipping */
477 if (!dev_priv->sou_priv)
478 return -ENOSYS;
479
480 if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
481 return -EINVAL;
482
483 crtc->primary->fb = fb;
484
485 /* do a full screen dirty update */
486 clips.x1 = clips.y1 = 0;
487 clips.x2 = fb->width;
488 clips.y2 = fb->height;
489
490 if (vfb->dmabuf)
491 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
492 &clips, 1, 1,
493 true, &fence);
494 else
495 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
496 &clips, NULL, NULL,
497 0, 0, 1, 1, &fence);
498
499
500 if (ret != 0)
501 goto out_no_fence;
502 if (!fence) {
503 ret = -EINVAL;
504 goto out_no_fence;
505 }
506
507 if (event) {
508 struct drm_file *file_priv = event->base.file_priv;
509
510 ret = vmw_event_fence_action_queue(file_priv, fence,
511 &event->base,
512 &event->event.tv_sec,
513 &event->event.tv_usec,
514 true);
515 }
516
517 /*
518 * No need to hold on to this now. The only cleanup
519 * we need to do if we fail is unref the fence.
520 */
521 vmw_fence_obj_unreference(&fence);
522
523 if (vmw_crtc_to_du(crtc)->is_implicit)
524 vmw_sou_update_implicit_fb(dev_priv, crtc);
525
526 return ret;
527
528out_no_fence:
529 crtc->primary->fb = old_fb;
530 return ret;
531}
532
393static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
394 .save = vmw_du_crtc_save, 534 .save = vmw_du_crtc_save,
395 .restore = vmw_du_crtc_restore, 535 .restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
398 .gamma_set = vmw_du_crtc_gamma_set, 538 .gamma_set = vmw_du_crtc_gamma_set,
399 .destroy = vmw_sou_crtc_destroy, 539 .destroy = vmw_sou_crtc_destroy,
400 .set_config = vmw_sou_crtc_set_config, 540 .set_config = vmw_sou_crtc_set_config,
401 .page_flip = vmw_du_page_flip, 541 .page_flip = vmw_sou_crtc_page_flip,
402}; 542};
403 543
404/* 544/*
@@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
423 vmw_sou_destroy(vmw_connector_to_sou(connector)); 563 vmw_sou_destroy(vmw_connector_to_sou(connector));
424} 564}
425 565
426static struct drm_connector_funcs vmw_legacy_connector_funcs = { 566static struct drm_connector_funcs vmw_sou_connector_funcs = {
427 .dpms = vmw_du_connector_dpms, 567 .dpms = vmw_du_connector_dpms,
428 .save = vmw_du_connector_save, 568 .save = vmw_du_connector_save,
429 .restore = vmw_du_connector_restore, 569 .restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
458 sou->base.pref_mode = NULL; 598 sou->base.pref_mode = NULL;
459 sou->base.is_implicit = true; 599 sou->base.is_implicit = true;
460 600
461 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 601 drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
462 DRM_MODE_CONNECTOR_VIRTUAL); 602 DRM_MODE_CONNECTOR_VIRTUAL);
463 connector->status = vmw_du_connector_detect(connector, true); 603 connector->status = vmw_du_connector_detect(connector, true);
464 604
@@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
481 return 0; 621 return 0;
482} 622}
483 623
484int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) 624int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
485{ 625{
486 struct drm_device *dev = dev_priv->dev; 626 struct drm_device *dev = dev_priv->dev;
487 int i, ret; 627 int i, ret;
@@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
516 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) 656 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
517 vmw_sou_init(dev_priv, i); 657 vmw_sou_init(dev_priv, i);
518 658
519 DRM_INFO("Screen objects system initialized\n"); 659 dev_priv->active_display_unit = vmw_du_screen_object;
660
661 DRM_INFO("Screen Objects Display Unit initialized\n");
520 662
521 return 0; 663 return 0;
522 664
@@ -529,7 +671,7 @@ err_no_mem:
529 return ret; 671 return ret;
530} 672}
531 673
532int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) 674int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
533{ 675{
534 struct drm_device *dev = dev_priv->dev; 676 struct drm_device *dev = dev_priv->dev;
535 677
@@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
543 return 0; 685 return 0;
544} 686}
545 687
688static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
689 struct vmw_framebuffer *framebuffer)
690{
691 struct vmw_dma_buffer *buf =
692 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
693 base)->buffer;
694 int depth = framebuffer->base.depth;
695 struct {
696 uint32_t header;
697 SVGAFifoCmdDefineGMRFB body;
698 } *cmd;
699
700 /* Emulate RGBA support, contrary to svga_reg.h this is not
701 * supported by hosts. This is only a problem if we are reading
702 * this value later and expecting what we uploaded back.
703 */
704 if (depth == 32)
705 depth = 24;
706
707 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
708 if (!cmd) {
709 DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
710 return -ENOMEM;
711 }
712
713 cmd->header = SVGA_CMD_DEFINE_GMRFB;
714 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
715 cmd->body.format.colorDepth = depth;
716 cmd->body.format.reserved = 0;
717 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
718 /* Buffer is reserved in vram or GMR */
719 vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
720 vmw_fifo_commit(dev_priv, sizeof(*cmd));
721
722 return 0;
723}
724
546/** 725/**
547 * Returns if this unit can be page flipped. 726 * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
548 * Must be called with the mode_config mutex held. 727 * blit surface to screen command.
728 *
729 * @dirty: The closure structure.
730 *
731 * Fills in the missing fields in the command, and translates the cliprects
732 * to match the destination bounding box encoded.
549 */ 733 */
550bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, 734static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
551 struct drm_crtc *crtc)
552{ 735{
553 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 736 struct vmw_kms_sou_surface_dirty *sdirty =
737 container_of(dirty, typeof(*sdirty), base);
738 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
739 s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
740 s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
741 size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
742 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
743 int i;
744
745 cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
746 cmd->header.size = sizeof(cmd->body) + region_size;
747
748 /*
749 * Use the destination bounding box to specify destination - and
750 * source bounding regions.
751 */
752 cmd->body.destRect.left = sdirty->left;
753 cmd->body.destRect.right = sdirty->right;
754 cmd->body.destRect.top = sdirty->top;
755 cmd->body.destRect.bottom = sdirty->bottom;
756
757 cmd->body.srcRect.left = sdirty->left + trans_x;
758 cmd->body.srcRect.right = sdirty->right + trans_x;
759 cmd->body.srcRect.top = sdirty->top + trans_y;
760 cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
761
762 cmd->body.srcImage.sid = sdirty->sid;
763 cmd->body.destScreenId = dirty->unit->unit;
764
765 /* Blits are relative to the destination rect. Translate. */
766 for (i = 0; i < dirty->num_hits; ++i, ++blit) {
767 blit->left -= sdirty->left;
768 blit->right -= sdirty->left;
769 blit->top -= sdirty->top;
770 blit->bottom -= sdirty->top;
771 }
554 772
555 if (!sou->base.is_implicit) 773 vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
556 return true;
557 774
558 if (dev_priv->sou_priv->num_implicit != 1) 775 sdirty->left = sdirty->top = S32_MAX;
559 return false; 776 sdirty->right = sdirty->bottom = S32_MIN;
777}
560 778
561 return true; 779/**
780 * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
781 *
782 * @dirty: The closure structure
783 *
784 * Encodes a SVGASignedRect cliprect and updates the bounding box of the
785 * BLIT_SURFACE_TO_SCREEN command.
786 */
787static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
788{
789 struct vmw_kms_sou_surface_dirty *sdirty =
790 container_of(dirty, typeof(*sdirty), base);
791 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
792 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
793
794 /* Destination rect. */
795 blit += dirty->num_hits;
796 blit->left = dirty->unit_x1;
797 blit->top = dirty->unit_y1;
798 blit->right = dirty->unit_x2;
799 blit->bottom = dirty->unit_y2;
800
801 /* Destination bounding box */
802 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
803 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
804 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
805 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
806
807 dirty->num_hits++;
562} 808}
563 809
564/** 810/**
565 * Update the implicit fb to the current fb of this crtc. 811 * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
566 * Must be called with the mode_config mutex held. 812 *
813 * @dev_priv: Pointer to the device private structure.
814 * @framebuffer: Pointer to the surface-buffer backed framebuffer.
815 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
816 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
817 * be NULL.
818 * @srf: Pointer to surface to blit from. If NULL, the surface attached
819 * to @framebuffer will be used.
820 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
821 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
822 * @num_clips: Number of clip rects in @clips.
823 * @inc: Increment to use when looping over @clips.
824 * @out_fence: If non-NULL, will return a ref-counted pointer to a
825 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
826 * case the device has already synchronized.
827 *
828 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
829 * interrupted.
567 */ 830 */
568void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, 831int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
569 struct drm_crtc *crtc) 832 struct vmw_framebuffer *framebuffer,
833 struct drm_clip_rect *clips,
834 struct drm_vmw_rect *vclips,
835 struct vmw_resource *srf,
836 s32 dest_x,
837 s32 dest_y,
838 unsigned num_clips, int inc,
839 struct vmw_fence_obj **out_fence)
570{ 840{
571 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 841 struct vmw_framebuffer_surface *vfbs =
842 container_of(framebuffer, typeof(*vfbs), base);
843 struct vmw_kms_sou_surface_dirty sdirty;
844 int ret;
572 845
573 BUG_ON(!sou->base.is_implicit); 846 if (!srf)
847 srf = &vfbs->surface->res;
574 848
575 dev_priv->sou_priv->implicit_fb = 849 ret = vmw_kms_helper_resource_prepare(srf, true);
576 vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb); 850 if (ret)
851 return ret;
852
853 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
854 sdirty.base.clip = vmw_sou_surface_clip;
855 sdirty.base.dev_priv = dev_priv;
856 sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
857 sizeof(SVGASignedRect) * num_clips;
858
859 sdirty.sid = srf->id;
860 sdirty.left = sdirty.top = S32_MAX;
861 sdirty.right = sdirty.bottom = S32_MIN;
862 sdirty.dst_x = dest_x;
863 sdirty.dst_y = dest_y;
864
865 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
866 dest_x, dest_y, num_clips, inc,
867 &sdirty.base);
868 vmw_kms_helper_resource_finish(srf, out_fence);
869
870 return ret;
871}
872
873/**
874 * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
875 *
876 * @dirty: The closure structure.
877 *
878 * Commits a previously built command buffer of readback clips.
879 */
880static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
881{
882 vmw_fifo_commit(dirty->dev_priv,
883 sizeof(struct vmw_kms_sou_dmabuf_blit) *
884 dirty->num_hits);
885}
886
887/**
888 * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
889 *
890 * @dirty: The closure structure
891 *
892 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
893 */
894static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
895{
896 struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
897
898 blit += dirty->num_hits;
899 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
900 blit->body.destScreenId = dirty->unit->unit;
901 blit->body.srcOrigin.x = dirty->fb_x;
902 blit->body.srcOrigin.y = dirty->fb_y;
903 blit->body.destRect.left = dirty->unit_x1;
904 blit->body.destRect.top = dirty->unit_y1;
905 blit->body.destRect.right = dirty->unit_x2;
906 blit->body.destRect.bottom = dirty->unit_y2;
907 dirty->num_hits++;
908}
909
910/**
911 * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
912 *
913 * @dev_priv: Pointer to the device private structure.
914 * @framebuffer: Pointer to the dma-buffer backed framebuffer.
915 * @clips: Array of clip rects.
916 * @num_clips: Number of clip rects in @clips.
917 * @increment: Increment to use when looping over @clips.
918 * @interruptible: Whether to perform waits interruptible if possible.
919 * @out_fence: If non-NULL, will return a ref-counted pointer to a
920 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
921 * case the device has already synchronized.
922 *
923 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
924 * interrupted.
925 */
926int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
927 struct vmw_framebuffer *framebuffer,
928 struct drm_clip_rect *clips,
929 unsigned num_clips, int increment,
930 bool interruptible,
931 struct vmw_fence_obj **out_fence)
932{
933 struct vmw_dma_buffer *buf =
934 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
935 base)->buffer;
936 struct vmw_kms_dirty dirty;
937 int ret;
938
939 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
940 false);
941 if (ret)
942 return ret;
943
944 ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
945 if (unlikely(ret != 0))
946 goto out_revert;
947
948 dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
949 dirty.clip = vmw_sou_dmabuf_clip;
950 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
951 num_clips;
952 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
953 0, 0, num_clips, increment, &dirty);
954 vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
955
956 return ret;
957
958out_revert:
959 vmw_kms_helper_buffer_revert(buf);
960
961 return ret;
962}
963
964
965/**
966 * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
967 *
968 * @dirty: The closure structure.
969 *
970 * Commits a previously built command buffer of readback clips.
971 */
972static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
973{
974 vmw_fifo_commit(dirty->dev_priv,
975 sizeof(struct vmw_kms_sou_readback_blit) *
976 dirty->num_hits);
977}
978
979/**
980 * vmw_sou_readback_clip - Callback to encode a readback cliprect.
981 *
982 * @dirty: The closure structure
983 *
984 * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
985 */
986static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
987{
988 struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
989
990 blit += dirty->num_hits;
991 blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
992 blit->body.srcScreenId = dirty->unit->unit;
993 blit->body.destOrigin.x = dirty->fb_x;
994 blit->body.destOrigin.y = dirty->fb_y;
995 blit->body.srcRect.left = dirty->unit_x1;
996 blit->body.srcRect.top = dirty->unit_y1;
997 blit->body.srcRect.right = dirty->unit_x2;
998 blit->body.srcRect.bottom = dirty->unit_y2;
999 dirty->num_hits++;
1000}
1001
1002/**
1003 * vmw_kms_sou_readback - Perform a readback from the screen object system to
1004 * a dma-buffer backed framebuffer.
1005 *
1006 * @dev_priv: Pointer to the device private structure.
1007 * @file_priv: Pointer to a struct drm_file identifying the caller.
1008 * Must be set to NULL if @user_fence_rep is NULL.
1009 * @vfb: Pointer to the dma-buffer backed framebuffer.
1010 * @user_fence_rep: User-space provided structure for fence information.
1011 * Must be set to non-NULL if @file_priv is non-NULL.
1012 * @vclips: Array of clip rects.
1013 * @num_clips: Number of clip rects in @vclips.
1014 *
1015 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1016 * interrupted.
1017 */
1018int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1019 struct drm_file *file_priv,
1020 struct vmw_framebuffer *vfb,
1021 struct drm_vmw_fence_rep __user *user_fence_rep,
1022 struct drm_vmw_rect *vclips,
1023 uint32_t num_clips)
1024{
1025 struct vmw_dma_buffer *buf =
1026 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
1027 struct vmw_kms_dirty dirty;
1028 int ret;
1029
1030 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
1031 if (ret)
1032 return ret;
1033
1034 ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
1035 if (unlikely(ret != 0))
1036 goto out_revert;
1037
1038 dirty.fifo_commit = vmw_sou_readback_fifo_commit;
1039 dirty.clip = vmw_sou_readback_clip;
1040 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
1041 num_clips;
1042 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
1043 0, 0, num_clips, 1, &dirty);
1044 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
1045 user_fence_rep);
1046
1047 return ret;
1048
1049out_revert:
1050 vmw_kms_helper_buffer_revert(buf);
1051
1052 return ret;
577} 1053}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a43aa6..bba1ee395478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_binding.h"
30#include "ttm/ttm_placement.h" 31#include "ttm/ttm_placement.h"
31 32
32struct vmw_shader { 33struct vmw_shader {
33 struct vmw_resource res; 34 struct vmw_resource res;
34 SVGA3dShaderType type; 35 SVGA3dShaderType type;
35 uint32_t size; 36 uint32_t size;
37 uint8_t num_input_sig;
38 uint8_t num_output_sig;
36}; 39};
37 40
38struct vmw_user_shader { 41struct vmw_user_shader {
@@ -40,8 +43,18 @@ struct vmw_user_shader {
40 struct vmw_shader shader; 43 struct vmw_shader shader;
41}; 44};
42 45
46struct vmw_dx_shader {
47 struct vmw_resource res;
48 struct vmw_resource *ctx;
49 struct vmw_resource *cotable;
50 u32 id;
51 bool committed;
52 struct list_head cotable_head;
53};
54
43static uint64_t vmw_user_shader_size; 55static uint64_t vmw_user_shader_size;
44static uint64_t vmw_shader_size; 56static uint64_t vmw_shader_size;
57static size_t vmw_shader_dx_size;
45 58
46static void vmw_user_shader_free(struct vmw_resource *res); 59static void vmw_user_shader_free(struct vmw_resource *res);
47static struct vmw_resource * 60static struct vmw_resource *
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
55 struct ttm_validate_buffer *val_buf); 68 struct ttm_validate_buffer *val_buf);
56static int vmw_gb_shader_destroy(struct vmw_resource *res); 69static int vmw_gb_shader_destroy(struct vmw_resource *res);
57 70
71static int vmw_dx_shader_create(struct vmw_resource *res);
72static int vmw_dx_shader_bind(struct vmw_resource *res,
73 struct ttm_validate_buffer *val_buf);
74static int vmw_dx_shader_unbind(struct vmw_resource *res,
75 bool readback,
76 struct ttm_validate_buffer *val_buf);
77static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
78 enum vmw_cmdbuf_res_state state);
79static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
80static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
81static uint64_t vmw_user_shader_size;
82
58static const struct vmw_user_resource_conv user_shader_conv = { 83static const struct vmw_user_resource_conv user_shader_conv = {
59 .object_type = VMW_RES_SHADER, 84 .object_type = VMW_RES_SHADER,
60 .base_obj_to_res = vmw_user_shader_base_to_res, 85 .base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
77 .unbind = vmw_gb_shader_unbind 102 .unbind = vmw_gb_shader_unbind
78}; 103};
79 104
105static const struct vmw_res_func vmw_dx_shader_func = {
106 .res_type = vmw_res_shader,
107 .needs_backup = true,
108 .may_evict = false,
109 .type_name = "dx shaders",
110 .backup_placement = &vmw_mob_placement,
111 .create = vmw_dx_shader_create,
112 /*
113 * The destroy callback is only called with a committed resource on
114 * context destroy, in which case we destroy the cotable anyway,
115 * so there's no need to destroy DX shaders separately.
116 */
117 .destroy = NULL,
118 .bind = vmw_dx_shader_bind,
119 .unbind = vmw_dx_shader_unbind,
120 .commit_notify = vmw_dx_shader_commit_notify,
121};
122
80/** 123/**
81 * Shader management: 124 * Shader management:
82 */ 125 */
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
87 return container_of(res, struct vmw_shader, res); 130 return container_of(res, struct vmw_shader, res);
88} 131}
89 132
133/**
134 * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
135 * struct vmw_dx_shader
136 *
137 * @res: Pointer to the struct vmw_resource.
138 */
139static inline struct vmw_dx_shader *
140vmw_res_to_dx_shader(struct vmw_resource *res)
141{
142 return container_of(res, struct vmw_dx_shader, res);
143}
144
90static void vmw_hw_shader_destroy(struct vmw_resource *res) 145static void vmw_hw_shader_destroy(struct vmw_resource *res)
91{ 146{
92 (void) vmw_gb_shader_destroy(res); 147 if (likely(res->func->destroy))
148 (void) res->func->destroy(res);
149 else
150 res->id = -1;
93} 151}
94 152
153
95static int vmw_gb_shader_init(struct vmw_private *dev_priv, 154static int vmw_gb_shader_init(struct vmw_private *dev_priv,
96 struct vmw_resource *res, 155 struct vmw_resource *res,
97 uint32_t size, 156 uint32_t size,
98 uint64_t offset, 157 uint64_t offset,
99 SVGA3dShaderType type, 158 SVGA3dShaderType type,
159 uint8_t num_input_sig,
160 uint8_t num_output_sig,
100 struct vmw_dma_buffer *byte_code, 161 struct vmw_dma_buffer *byte_code,
101 void (*res_free) (struct vmw_resource *res)) 162 void (*res_free) (struct vmw_resource *res))
102{ 163{
103 struct vmw_shader *shader = vmw_res_to_shader(res); 164 struct vmw_shader *shader = vmw_res_to_shader(res);
104 int ret; 165 int ret;
105 166
106 ret = vmw_resource_init(dev_priv, res, true, 167 ret = vmw_resource_init(dev_priv, res, true, res_free,
107 res_free, &vmw_gb_shader_func); 168 &vmw_gb_shader_func);
108
109 169
110 if (unlikely(ret != 0)) { 170 if (unlikely(ret != 0)) {
111 if (res_free) 171 if (res_free)
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
122 } 182 }
123 shader->size = size; 183 shader->size = size;
124 shader->type = type; 184 shader->type = type;
185 shader->num_input_sig = num_input_sig;
186 shader->num_output_sig = num_output_sig;
125 187
126 vmw_resource_activate(res, vmw_hw_shader_destroy); 188 vmw_resource_activate(res, vmw_hw_shader_destroy);
127 return 0; 189 return 0;
128} 190}
129 191
192/*
193 * GB shader code:
194 */
195
130static int vmw_gb_shader_create(struct vmw_resource *res) 196static int vmw_gb_shader_create(struct vmw_resource *res)
131{ 197{
132 struct vmw_private *dev_priv = res->dev_priv; 198 struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
165 cmd->body.type = shader->type; 231 cmd->body.type = shader->type;
166 cmd->body.sizeInBytes = shader->size; 232 cmd->body.sizeInBytes = shader->size;
167 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 233 vmw_fifo_commit(dev_priv, sizeof(*cmd));
168 (void) vmw_3d_resource_inc(dev_priv, false); 234 vmw_fifo_resource_inc(dev_priv);
169 235
170 return 0; 236 return 0;
171 237
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
259 return 0; 325 return 0;
260 326
261 mutex_lock(&dev_priv->binding_mutex); 327 mutex_lock(&dev_priv->binding_mutex);
262 vmw_context_binding_res_list_scrub(&res->binding_head); 328 vmw_binding_res_list_scrub(&res->binding_head);
263 329
264 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 330 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
265 if (unlikely(cmd == NULL)) { 331 if (unlikely(cmd == NULL)) {
@@ -275,12 +341,327 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
275 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 341 vmw_fifo_commit(dev_priv, sizeof(*cmd));
276 mutex_unlock(&dev_priv->binding_mutex); 342 mutex_unlock(&dev_priv->binding_mutex);
277 vmw_resource_release_id(res); 343 vmw_resource_release_id(res);
278 vmw_3d_resource_dec(dev_priv, false); 344 vmw_fifo_resource_dec(dev_priv);
345
346 return 0;
347}
348
349/*
350 * DX shader code:
351 */
352
353/**
354 * vmw_dx_shader_commit_notify - Notify that a shader operation has been
355 * committed to hardware from a user-supplied command stream.
356 *
357 * @res: Pointer to the shader resource.
358 * @state: Indicating whether a creation or removal has been committed.
359 *
360 */
361static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
362 enum vmw_cmdbuf_res_state state)
363{
364 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
365 struct vmw_private *dev_priv = res->dev_priv;
366
367 if (state == VMW_CMDBUF_RES_ADD) {
368 mutex_lock(&dev_priv->binding_mutex);
369 vmw_cotable_add_resource(shader->cotable,
370 &shader->cotable_head);
371 shader->committed = true;
372 res->id = shader->id;
373 mutex_unlock(&dev_priv->binding_mutex);
374 } else {
375 mutex_lock(&dev_priv->binding_mutex);
376 list_del_init(&shader->cotable_head);
377 shader->committed = false;
378 res->id = -1;
379 mutex_unlock(&dev_priv->binding_mutex);
380 }
381}
382
383/**
384 * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
385 *
386 * @res: The shader resource
387 *
388 * This function reverts a scrub operation.
389 */
390static int vmw_dx_shader_unscrub(struct vmw_resource *res)
391{
392 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
393 struct vmw_private *dev_priv = res->dev_priv;
394 struct {
395 SVGA3dCmdHeader header;
396 SVGA3dCmdDXBindShader body;
397 } *cmd;
398
399 if (!list_empty(&shader->cotable_head) || !shader->committed)
400 return 0;
401
402 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
403 shader->ctx->id);
404 if (unlikely(cmd == NULL)) {
405 DRM_ERROR("Failed reserving FIFO space for shader "
406 "scrubbing.\n");
407 return -ENOMEM;
408 }
409
410 cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
411 cmd->header.size = sizeof(cmd->body);
412 cmd->body.cid = shader->ctx->id;
413 cmd->body.shid = shader->id;
414 cmd->body.mobid = res->backup->base.mem.start;
415 cmd->body.offsetInBytes = res->backup_offset;
416 vmw_fifo_commit(dev_priv, sizeof(*cmd));
417
418 vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
419
420 return 0;
421}
422
423/**
424 * vmw_dx_shader_create - The DX shader create callback
425 *
426 * @res: The DX shader resource
427 *
428 * The create callback is called as part of resource validation and
429 * makes sure that we unscrub the shader if it's previously been scrubbed.
430 */
431static int vmw_dx_shader_create(struct vmw_resource *res)
432{
433 struct vmw_private *dev_priv = res->dev_priv;
434 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
435 int ret = 0;
436
437 WARN_ON_ONCE(!shader->committed);
438
439 if (!list_empty(&res->mob_head)) {
440 mutex_lock(&dev_priv->binding_mutex);
441 ret = vmw_dx_shader_unscrub(res);
442 mutex_unlock(&dev_priv->binding_mutex);
443 }
444
445 res->id = shader->id;
446 return ret;
447}
448
449/**
450 * vmw_dx_shader_bind - The DX shader bind callback
451 *
452 * @res: The DX shader resource
453 * @val_buf: Pointer to the validate buffer.
454 *
455 */
456static int vmw_dx_shader_bind(struct vmw_resource *res,
457 struct ttm_validate_buffer *val_buf)
458{
459 struct vmw_private *dev_priv = res->dev_priv;
460 struct ttm_buffer_object *bo = val_buf->bo;
461
462 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
463 mutex_lock(&dev_priv->binding_mutex);
464 vmw_dx_shader_unscrub(res);
465 mutex_unlock(&dev_priv->binding_mutex);
466
467 return 0;
468}
469
470/**
471 * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
472 *
473 * @res: The shader resource
474 *
475 * This function unbinds a MOB from the DX shader without requiring the
476 * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
477 * However, once the driver eventually decides to unbind the MOB, it doesn't
478 * need to access the context.
479 */
480static int vmw_dx_shader_scrub(struct vmw_resource *res)
481{
482 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
483 struct vmw_private *dev_priv = res->dev_priv;
484 struct {
485 SVGA3dCmdHeader header;
486 SVGA3dCmdDXBindShader body;
487 } *cmd;
488
489 if (list_empty(&shader->cotable_head))
490 return 0;
491
492 WARN_ON_ONCE(!shader->committed);
493 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
494 if (unlikely(cmd == NULL)) {
495 DRM_ERROR("Failed reserving FIFO space for shader "
496 "scrubbing.\n");
497 return -ENOMEM;
498 }
499
500 cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
501 cmd->header.size = sizeof(cmd->body);
502 cmd->body.cid = shader->ctx->id;
503 cmd->body.shid = res->id;
504 cmd->body.mobid = SVGA3D_INVALID_ID;
505 cmd->body.offsetInBytes = 0;
506 vmw_fifo_commit(dev_priv, sizeof(*cmd));
507 res->id = -1;
508 list_del_init(&shader->cotable_head);
279 509
280 return 0; 510 return 0;
281} 511}
282 512
283/** 513/**
514 * vmw_dx_shader_unbind - The dx shader unbind callback.
515 *
516 * @res: The shader resource
517 * @readback: Whether this is a readback unbind. Currently unused.
518 * @val_buf: MOB buffer information.
519 */
520static int vmw_dx_shader_unbind(struct vmw_resource *res,
521 bool readback,
522 struct ttm_validate_buffer *val_buf)
523{
524 struct vmw_private *dev_priv = res->dev_priv;
525 struct vmw_fence_obj *fence;
526 int ret;
527
528 BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
529
530 mutex_lock(&dev_priv->binding_mutex);
531 ret = vmw_dx_shader_scrub(res);
532 mutex_unlock(&dev_priv->binding_mutex);
533
534 if (ret)
535 return ret;
536
537 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
538 &fence, NULL);
539 vmw_fence_single_bo(val_buf->bo, fence);
540
541 if (likely(fence != NULL))
542 vmw_fence_obj_unreference(&fence);
543
544 return 0;
545}
546
547/**
548 * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
549 * DX shaders.
550 *
551 * @dev_priv: Pointer to device private structure.
552 * @list: The list of cotable resources.
553 * @readback: Whether the call was part of a readback unbind.
554 *
555 * Scrubs all shader MOBs so that any subsequent shader unbind or shader
556 * destroy operation won't need to swap in the context.
557 */
558void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
559 struct list_head *list,
560 bool readback)
561{
562 struct vmw_dx_shader *entry, *next;
563
564 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
565
566 list_for_each_entry_safe(entry, next, list, cotable_head) {
567 WARN_ON(vmw_dx_shader_scrub(&entry->res));
568 if (!readback)
569 entry->committed = false;
570 }
571}
572
573/**
574 * vmw_dx_shader_res_free - The DX shader free callback
575 *
576 * @res: The shader resource
577 *
578 * Frees the DX shader resource and updates memory accounting.
579 */
580static void vmw_dx_shader_res_free(struct vmw_resource *res)
581{
582 struct vmw_private *dev_priv = res->dev_priv;
583 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
584
585 vmw_resource_unreference(&shader->cotable);
586 kfree(shader);
587 ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
588}
589
590/**
591 * vmw_dx_shader_add - Add a shader resource as a command buffer managed
592 * resource.
593 *
594 * @man: The command buffer resource manager.
595 * @ctx: Pointer to the context resource.
596 * @user_key: The id used for this shader.
597 * @shader_type: The shader type.
598 * @list: The list of staged command buffer managed resources.
599 */
600int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
601 struct vmw_resource *ctx,
602 u32 user_key,
603 SVGA3dShaderType shader_type,
604 struct list_head *list)
605{
606 struct vmw_dx_shader *shader;
607 struct vmw_resource *res;
608 struct vmw_private *dev_priv = ctx->dev_priv;
609 int ret;
610
611 if (!vmw_shader_dx_size)
612 vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
613
614 if (!vmw_shader_id_ok(user_key, shader_type))
615 return -EINVAL;
616
617 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
618 false, true);
619 if (ret) {
620 if (ret != -ERESTARTSYS)
621 DRM_ERROR("Out of graphics memory for shader "
622 "creation.\n");
623 return ret;
624 }
625
626 shader = kmalloc(sizeof(*shader), GFP_KERNEL);
627 if (!shader) {
628 ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
629 return -ENOMEM;
630 }
631
632 res = &shader->res;
633 shader->ctx = ctx;
634 shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
635 shader->id = user_key;
636 shader->committed = false;
637 INIT_LIST_HEAD(&shader->cotable_head);
638 ret = vmw_resource_init(dev_priv, res, true,
639 vmw_dx_shader_res_free, &vmw_dx_shader_func);
640 if (ret)
641 goto out_resource_init;
642
643 /*
644 * The user_key name-space is not per shader type for DX shaders,
645 * so when hashing, use a single zero shader type.
646 */
647 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
648 vmw_shader_key(user_key, 0),
649 res, list);
650 if (ret)
651 goto out_resource_init;
652
653 res->id = shader->id;
654 vmw_resource_activate(res, vmw_hw_shader_destroy);
655
656out_resource_init:
657 vmw_resource_unreference(&res);
658
659 return ret;
660}
661
662
663
664/**
284 * User-space shader management: 665 * User-space shader management:
285 */ 666 */
286 667
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
341 size_t shader_size, 722 size_t shader_size,
342 size_t offset, 723 size_t offset,
343 SVGA3dShaderType shader_type, 724 SVGA3dShaderType shader_type,
725 uint8_t num_input_sig,
726 uint8_t num_output_sig,
344 struct ttm_object_file *tfile, 727 struct ttm_object_file *tfile,
345 u32 *handle) 728 u32 *handle)
346{ 729{
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
383 */ 766 */
384 767
385 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 768 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
386 offset, shader_type, buffer, 769 offset, shader_type, num_input_sig,
770 num_output_sig, buffer,
387 vmw_user_shader_free); 771 vmw_user_shader_free);
388 if (unlikely(ret != 0)) 772 if (unlikely(ret != 0))
389 goto out; 773 goto out;
@@ -407,11 +791,11 @@ out:
407} 791}
408 792
409 793
410struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, 794static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *buffer, 795 struct vmw_dma_buffer *buffer,
412 size_t shader_size, 796 size_t shader_size,
413 size_t offset, 797 size_t offset,
414 SVGA3dShaderType shader_type) 798 SVGA3dShaderType shader_type)
415{ 799{
416 struct vmw_shader *shader; 800 struct vmw_shader *shader;
417 struct vmw_resource *res; 801 struct vmw_resource *res;
@@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
449 * From here on, the destructor takes over resource freeing. 833 * From here on, the destructor takes over resource freeing.
450 */ 834 */
451 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 835 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
452 offset, shader_type, buffer, 836 offset, shader_type, 0, 0, buffer,
453 vmw_shader_free); 837 vmw_shader_free);
454 838
455out_err: 839out_err:
@@ -457,19 +841,20 @@ out_err:
457} 841}
458 842
459 843
460int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 844static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
461 struct drm_file *file_priv) 845 enum drm_vmw_shader_type shader_type_drm,
846 u32 buffer_handle, size_t size, size_t offset,
847 uint8_t num_input_sig, uint8_t num_output_sig,
848 uint32_t *shader_handle)
462{ 849{
463 struct vmw_private *dev_priv = vmw_priv(dev); 850 struct vmw_private *dev_priv = vmw_priv(dev);
464 struct drm_vmw_shader_create_arg *arg =
465 (struct drm_vmw_shader_create_arg *)data;
466 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 851 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
467 struct vmw_dma_buffer *buffer = NULL; 852 struct vmw_dma_buffer *buffer = NULL;
468 SVGA3dShaderType shader_type; 853 SVGA3dShaderType shader_type;
469 int ret; 854 int ret;
470 855
471 if (arg->buffer_handle != SVGA3D_INVALID_ID) { 856 if (buffer_handle != SVGA3D_INVALID_ID) {
472 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, 857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
473 &buffer); 858 &buffer);
474 if (unlikely(ret != 0)) { 859 if (unlikely(ret != 0)) {
475 DRM_ERROR("Could not find buffer for shader " 860 DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
478 } 863 }
479 864
480 if ((u64)buffer->base.num_pages * PAGE_SIZE < 865 if ((u64)buffer->base.num_pages * PAGE_SIZE <
481 (u64)arg->size + (u64)arg->offset) { 866 (u64)size + (u64)offset) {
482 DRM_ERROR("Illegal buffer- or shader size.\n"); 867 DRM_ERROR("Illegal buffer- or shader size.\n");
483 ret = -EINVAL; 868 ret = -EINVAL;
484 goto out_bad_arg; 869 goto out_bad_arg;
485 } 870 }
486 } 871 }
487 872
488 switch (arg->shader_type) { 873 switch (shader_type_drm) {
489 case drm_vmw_shader_type_vs: 874 case drm_vmw_shader_type_vs:
490 shader_type = SVGA3D_SHADERTYPE_VS; 875 shader_type = SVGA3D_SHADERTYPE_VS;
491 break; 876 break;
492 case drm_vmw_shader_type_ps: 877 case drm_vmw_shader_type_ps:
493 shader_type = SVGA3D_SHADERTYPE_PS; 878 shader_type = SVGA3D_SHADERTYPE_PS;
494 break; 879 break;
495 case drm_vmw_shader_type_gs:
496 shader_type = SVGA3D_SHADERTYPE_GS;
497 break;
498 default: 880 default:
499 DRM_ERROR("Illegal shader type.\n"); 881 DRM_ERROR("Illegal shader type.\n");
500 ret = -EINVAL; 882 ret = -EINVAL;
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
505 if (unlikely(ret != 0)) 887 if (unlikely(ret != 0))
506 goto out_bad_arg; 888 goto out_bad_arg;
507 889
508 ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 890 ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
509 shader_type, tfile, &arg->shader_handle); 891 shader_type, num_input_sig,
892 num_output_sig, tfile, shader_handle);
510 893
511 ttm_read_unlock(&dev_priv->reservation_sem); 894 ttm_read_unlock(&dev_priv->reservation_sem);
512out_bad_arg: 895out_bad_arg:
@@ -515,7 +898,7 @@ out_bad_arg:
515} 898}
516 899
517/** 900/**
518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and 901 * vmw_shader_id_ok - Check whether a compat shader user key and
519 * shader type are within valid bounds. 902 * shader type are within valid bounds.
520 * 903 *
521 * @user_key: User space id of the shader. 904 * @user_key: User space id of the shader.
@@ -523,13 +906,13 @@ out_bad_arg:
523 * 906 *
524 * Returns true if valid false if not. 907 * Returns true if valid false if not.
525 */ 908 */
526static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) 909static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
527{ 910{
528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; 911 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
529} 912}
530 913
531/** 914/**
532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. 915 * vmw_shader_key - Compute a hash key suitable for a compat shader.
533 * 916 *
534 * @user_key: User space id of the shader. 917 * @user_key: User space id of the shader.
535 * @shader_type: Shader type. 918 * @shader_type: Shader type.
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
537 * Returns a hash key suitable for a command buffer managed resource 920 * Returns a hash key suitable for a command buffer managed resource
538 * manager hash table. 921 * manager hash table.
539 */ 922 */
540static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) 923static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
541{ 924{
542 return user_key | (shader_type << 20); 925 return user_key | (shader_type << 20);
543} 926}
544 927
545/** 928/**
546 * vmw_compat_shader_remove - Stage a compat shader for removal. 929 * vmw_shader_remove - Stage a compat shader for removal.
547 * 930 *
548 * @man: Pointer to the compat shader manager identifying the shader namespace. 931 * @man: Pointer to the compat shader manager identifying the shader namespace.
549 * @user_key: The key that is used to identify the shader. The key is 932 * @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
551 * @shader_type: Shader type. 934 * @shader_type: Shader type.
552 * @list: Caller's list of staged command buffer resource actions. 935 * @list: Caller's list of staged command buffer resource actions.
553 */ 936 */
554int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 937int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
555 u32 user_key, SVGA3dShaderType shader_type, 938 u32 user_key, SVGA3dShaderType shader_type,
556 struct list_head *list) 939 struct list_head *list)
557{ 940{
558 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 941 struct vmw_resource *dummy;
942
943 if (!vmw_shader_id_ok(user_key, shader_type))
559 return -EINVAL; 944 return -EINVAL;
560 945
561 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, 946 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
562 vmw_compat_shader_key(user_key, 947 vmw_shader_key(user_key, shader_type),
563 shader_type), 948 list, &dummy);
564 list);
565} 949}
566 950
567/** 951/**
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
591 int ret; 975 int ret;
592 struct vmw_resource *res; 976 struct vmw_resource *res;
593 977
594 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 978 if (!vmw_shader_id_ok(user_key, shader_type))
595 return -EINVAL; 979 return -EINVAL;
596 980
597 /* Allocate and pin a DMA buffer */ 981 /* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
628 if (unlikely(ret != 0)) 1012 if (unlikely(ret != 0))
629 goto no_reserve; 1013 goto no_reserve;
630 1014
631 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, 1015 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
632 vmw_compat_shader_key(user_key, shader_type), 1016 vmw_shader_key(user_key, shader_type),
633 res, list); 1017 res, list);
634 vmw_resource_unreference(&res); 1018 vmw_resource_unreference(&res);
635no_reserve: 1019no_reserve:
@@ -639,7 +1023,7 @@ out:
639} 1023}
640 1024
641/** 1025/**
642 * vmw_compat_shader_lookup - Look up a compat shader 1026 * vmw_shader_lookup - Look up a compat shader
643 * 1027 *
644 * @man: Pointer to the command buffer managed resource manager identifying 1028 * @man: Pointer to the command buffer managed resource manager identifying
645 * the shader namespace. 1029 * the shader namespace.
@@ -650,14 +1034,26 @@ out:
650 * found. An error pointer otherwise. 1034 * found. An error pointer otherwise.
651 */ 1035 */
652struct vmw_resource * 1036struct vmw_resource *
653vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1037vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
654 u32 user_key, 1038 u32 user_key,
655 SVGA3dShaderType shader_type) 1039 SVGA3dShaderType shader_type)
656{ 1040{
657 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 1041 if (!vmw_shader_id_ok(user_key, shader_type))
658 return ERR_PTR(-EINVAL); 1042 return ERR_PTR(-EINVAL);
659 1043
660 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, 1044 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
661 vmw_compat_shader_key(user_key, 1045 vmw_shader_key(user_key, shader_type));
662 shader_type)); 1046}
1047
1048int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1049 struct drm_file *file_priv)
1050{
1051 struct drm_vmw_shader_create_arg *arg =
1052 (struct drm_vmw_shader_create_arg *)data;
1053
1054 return vmw_shader_define(dev, file_priv, arg->shader_type,
1055 arg->buffer_handle,
1056 arg->size, arg->offset,
1057 0, 0,
1058 &arg->shader_handle);
663} 1059}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 000000000000..5a73eebd0f35
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
1/**************************************************************************
2 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27#include "vmwgfx_drv.h"
28#include "vmwgfx_resource_priv.h"
29#include "vmwgfx_so.h"
30#include "vmwgfx_binding.h"
31
32/*
33 * The currently only reason we need to keep track of views is that if we
34 * destroy a hardware surface, all views pointing to it must also be destroyed,
35 * otherwise the device will error.
36 * So in particuar if a surface is evicted, we must destroy all views pointing
37 * to it, and all context bindings of that view. Similarly we must restore
38 * the view bindings, views and surfaces pointed to by the views when a
39 * context is referenced in the command stream.
40 */
41
42/**
43 * struct vmw_view - view metadata
44 *
45 * @res: The struct vmw_resource we derive from
46 * @ctx: Non-refcounted pointer to the context this view belongs to.
47 * @srf: Refcounted pointer to the surface pointed to by this view.
48 * @cotable: Refcounted pointer to the cotable holding this view.
49 * @srf_head: List head for the surface-to-view list.
50 * @cotable_head: List head for the cotable-to_view list.
51 * @view_type: View type.
52 * @view_id: User-space per context view id. Currently used also as per
53 * context device view id.
54 * @cmd_size: Size of the SVGA3D define view command that we've copied from the
55 * command stream.
56 * @committed: Whether the view is actually created or pending creation at the
57 * device level.
58 * @cmd: The SVGA3D define view command copied from the command stream.
59 */
60struct vmw_view {
61 struct rcu_head rcu;
62 struct vmw_resource res;
63 struct vmw_resource *ctx; /* Immutable */
64 struct vmw_resource *srf; /* Immutable */
65 struct vmw_resource *cotable; /* Immutable */
66 struct list_head srf_head; /* Protected by binding_mutex */
67 struct list_head cotable_head; /* Protected by binding_mutex */
68 unsigned view_type; /* Immutable */
69 unsigned view_id; /* Immutable */
70 u32 cmd_size; /* Immutable */
71 bool committed; /* Protected by binding_mutex */
72 u32 cmd[1]; /* Immutable */
73};
74
75static int vmw_view_create(struct vmw_resource *res);
76static int vmw_view_destroy(struct vmw_resource *res);
77static void vmw_hw_view_destroy(struct vmw_resource *res);
78static void vmw_view_commit_notify(struct vmw_resource *res,
79 enum vmw_cmdbuf_res_state state);
80
81static const struct vmw_res_func vmw_view_func = {
82 .res_type = vmw_res_view,
83 .needs_backup = false,
84 .may_evict = false,
85 .type_name = "DX view",
86 .backup_placement = NULL,
87 .create = vmw_view_create,
88 .commit_notify = vmw_view_commit_notify,
89};
90
91/**
92 * struct vmw_view - view define command body stub
93 *
94 * @view_id: The device id of the view being defined
95 * @sid: The surface id of the view being defined
96 *
97 * This generic struct is used by the code to change @view_id and @sid of a
98 * saved view define command.
99 */
100struct vmw_view_define {
101 uint32 view_id;
102 uint32 sid;
103};
104
105/**
106 * vmw_view - Convert a struct vmw_resource to a struct vmw_view
107 *
108 * @res: Pointer to the resource to convert.
109 *
110 * Returns a pointer to a struct vmw_view.
111 */
112static struct vmw_view *vmw_view(struct vmw_resource *res)
113{
114 return container_of(res, struct vmw_view, res);
115}
116
117/**
118 * vmw_view_commit_notify - Notify that a view operation has been committed to
119 * hardware from a user-supplied command stream.
120 *
121 * @res: Pointer to the view resource.
122 * @state: Indicating whether a creation or removal has been committed.
123 *
124 */
125static void vmw_view_commit_notify(struct vmw_resource *res,
126 enum vmw_cmdbuf_res_state state)
127{
128 struct vmw_view *view = vmw_view(res);
129 struct vmw_private *dev_priv = res->dev_priv;
130
131 mutex_lock(&dev_priv->binding_mutex);
132 if (state == VMW_CMDBUF_RES_ADD) {
133 struct vmw_surface *srf = vmw_res_to_srf(view->srf);
134
135 list_add_tail(&view->srf_head, &srf->view_list);
136 vmw_cotable_add_resource(view->cotable, &view->cotable_head);
137 view->committed = true;
138 res->id = view->view_id;
139
140 } else {
141 list_del_init(&view->cotable_head);
142 list_del_init(&view->srf_head);
143 view->committed = false;
144 res->id = -1;
145 }
146 mutex_unlock(&dev_priv->binding_mutex);
147}
148
149/**
150 * vmw_view_create - Create a hardware view.
151 *
152 * @res: Pointer to the view resource.
153 *
154 * Create a hardware view. Typically used if that view has previously been
155 * destroyed by an eviction operation.
156 */
157static int vmw_view_create(struct vmw_resource *res)
158{
159 struct vmw_view *view = vmw_view(res);
160 struct vmw_surface *srf = vmw_res_to_srf(view->srf);
161 struct vmw_private *dev_priv = res->dev_priv;
162 struct {
163 SVGA3dCmdHeader header;
164 struct vmw_view_define body;
165 } *cmd;
166
167 mutex_lock(&dev_priv->binding_mutex);
168 if (!view->committed) {
169 mutex_unlock(&dev_priv->binding_mutex);
170 return 0;
171 }
172
173 cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
174 view->ctx->id);
175 if (!cmd) {
176 DRM_ERROR("Failed reserving FIFO space for view creation.\n");
177 mutex_unlock(&dev_priv->binding_mutex);
178 return -ENOMEM;
179 }
180 memcpy(cmd, &view->cmd, view->cmd_size);
181 WARN_ON(cmd->body.view_id != view->view_id);
182 /* Sid may have changed due to surface eviction. */
183 WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
184 cmd->body.sid = view->srf->id;
185 vmw_fifo_commit(res->dev_priv, view->cmd_size);
186 res->id = view->view_id;
187 list_add_tail(&view->srf_head, &srf->view_list);
188 vmw_cotable_add_resource(view->cotable, &view->cotable_head);
189 mutex_unlock(&dev_priv->binding_mutex);
190
191 return 0;
192}
193
194/**
195 * vmw_view_destroy - Destroy a hardware view.
196 *
197 * @res: Pointer to the view resource.
198 *
199 * Destroy a hardware view. Typically used on unexpected termination of the
200 * owning process or if the surface the view is pointing to is destroyed.
201 */
202static int vmw_view_destroy(struct vmw_resource *res)
203{
204 struct vmw_private *dev_priv = res->dev_priv;
205 struct vmw_view *view = vmw_view(res);
206 struct {
207 SVGA3dCmdHeader header;
208 union vmw_view_destroy body;
209 } *cmd;
210
211 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
212 vmw_binding_res_list_scrub(&res->binding_head);
213
214 if (!view->committed || res->id == -1)
215 return 0;
216
217 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
218 if (!cmd) {
219 DRM_ERROR("Failed reserving FIFO space for view "
220 "destruction.\n");
221 return -ENOMEM;
222 }
223
224 cmd->header.id = vmw_view_destroy_cmds[view->view_type];
225 cmd->header.size = sizeof(cmd->body);
226 cmd->body.view_id = view->view_id;
227 vmw_fifo_commit(dev_priv, sizeof(*cmd));
228 res->id = -1;
229 list_del_init(&view->cotable_head);
230 list_del_init(&view->srf_head);
231
232 return 0;
233}
234
235/**
236 * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
237 *
238 * @res: Pointer to the view resource.
239 *
240 * Destroy a hardware view if it's still present.
241 */
242static void vmw_hw_view_destroy(struct vmw_resource *res)
243{
244 struct vmw_private *dev_priv = res->dev_priv;
245
246 mutex_lock(&dev_priv->binding_mutex);
247 WARN_ON(vmw_view_destroy(res));
248 res->id = -1;
249 mutex_unlock(&dev_priv->binding_mutex);
250}
251
252/**
253 * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
254 *
255 * @user_key: The user-space id used for the view.
256 * @view_type: The view type.
257 *
258 * Destroy a hardware view if it's still present.
259 */
260static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
261{
262 return user_key | (view_type << 20);
263}
264
265/**
266 * vmw_view_id_ok - Basic view id and type range checks.
267 *
268 * @user_key: The user-space id used for the view.
269 * @view_type: The view type.
270 *
271 * Checks that the view id and type (typically provided by user-space) is
272 * valid.
273 */
274static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
275{
276 return (user_key < SVGA_COTABLE_MAX_IDS &&
277 view_type < vmw_view_max);
278}
279
280/**
281 * vmw_view_res_free - resource res_free callback for view resources
282 *
283 * @res: Pointer to a struct vmw_resource
284 *
285 * Frees memory and memory accounting held by a struct vmw_view.
286 */
287static void vmw_view_res_free(struct vmw_resource *res)
288{
289 struct vmw_view *view = vmw_view(res);
290 size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
291 struct vmw_private *dev_priv = res->dev_priv;
292
293 vmw_resource_unreference(&view->cotable);
294 vmw_resource_unreference(&view->srf);
295 kfree_rcu(view, rcu);
296 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
297}
298
299/**
300 * vmw_view_add - Create a view resource and stage it for addition
301 * as a command buffer managed resource.
302 *
303 * @man: Pointer to the compat shader manager identifying the shader namespace.
304 * @ctx: Pointer to a struct vmw_resource identifying the active context.
305 * @srf: Pointer to a struct vmw_resource identifying the surface the view
306 * points to.
307 * @view_type: The view type deduced from the view create command.
308 * @user_key: The key that is used to identify the shader. The key is
309 * unique to the view type and to the context.
310 * @cmd: Pointer to the view create command in the command stream.
311 * @cmd_size: Size of the view create command in the command stream.
312 * @list: Caller's list of staged command buffer resource actions.
313 */
314int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
315 struct vmw_resource *ctx,
316 struct vmw_resource *srf,
317 enum vmw_view_type view_type,
318 u32 user_key,
319 const void *cmd,
320 size_t cmd_size,
321 struct list_head *list)
322{
323 static const size_t vmw_view_define_sizes[] = {
324 [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
325 [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
326 [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
327 };
328
329 struct vmw_private *dev_priv = ctx->dev_priv;
330 struct vmw_resource *res;
331 struct vmw_view *view;
332 size_t size;
333 int ret;
334
335 if (cmd_size != vmw_view_define_sizes[view_type] +
336 sizeof(SVGA3dCmdHeader)) {
337 DRM_ERROR("Illegal view create command size.\n");
338 return -EINVAL;
339 }
340
341 if (!vmw_view_id_ok(user_key, view_type)) {
342 DRM_ERROR("Illegal view add view id.\n");
343 return -EINVAL;
344 }
345
346 size = offsetof(struct vmw_view, cmd) + cmd_size;
347
348 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
349 if (ret) {
350 if (ret != -ERESTARTSYS)
351 DRM_ERROR("Out of graphics memory for view"
352 " creation.\n");
353 return ret;
354 }
355
356 view = kmalloc(size, GFP_KERNEL);
357 if (!view) {
358 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
359 return -ENOMEM;
360 }
361
362 res = &view->res;
363 view->ctx = ctx;
364 view->srf = vmw_resource_reference(srf);
365 view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
366 view->view_type = view_type;
367 view->view_id = user_key;
368 view->cmd_size = cmd_size;
369 view->committed = false;
370 INIT_LIST_HEAD(&view->srf_head);
371 INIT_LIST_HEAD(&view->cotable_head);
372 memcpy(&view->cmd, cmd, cmd_size);
373 ret = vmw_resource_init(dev_priv, res, true,
374 vmw_view_res_free, &vmw_view_func);
375 if (ret)
376 goto out_resource_init;
377
378 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
379 vmw_view_key(user_key, view_type),
380 res, list);
381 if (ret)
382 goto out_resource_init;
383
384 res->id = view->view_id;
385 vmw_resource_activate(res, vmw_hw_view_destroy);
386
387out_resource_init:
388 vmw_resource_unreference(&res);
389
390 return ret;
391}
392
393/**
394 * vmw_view_remove - Stage a view for removal.
395 *
396 * @man: Pointer to the view manager identifying the shader namespace.
397 * @user_key: The key that is used to identify the view. The key is
398 * unique to the view type.
399 * @view_type: View type
400 * @list: Caller's list of staged command buffer resource actions.
401 * @res_p: If the resource is in an already committed state, points to the
402 * struct vmw_resource on successful return. The pointer will be
403 * non ref-counted.
404 */
405int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
406 u32 user_key, enum vmw_view_type view_type,
407 struct list_head *list,
408 struct vmw_resource **res_p)
409{
410 if (!vmw_view_id_ok(user_key, view_type)) {
411 DRM_ERROR("Illegal view remove view id.\n");
412 return -EINVAL;
413 }
414
415 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
416 vmw_view_key(user_key, view_type),
417 list, res_p);
418}
419
420/**
421 * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
422 *
423 * @dev_priv: Pointer to a device private struct.
424 * @list: List of views belonging to a cotable.
425 * @readback: Unused. Needed for function interface only.
426 *
427 * This function evicts all views belonging to a cotable.
428 * It must be called with the binding_mutex held, and the caller must hold
429 * a reference to the view resource. This is typically called before the
430 * cotable is paged out.
431 */
432void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
433 struct list_head *list,
434 bool readback)
435{
436 struct vmw_view *entry, *next;
437
438 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
439
440 list_for_each_entry_safe(entry, next, list, cotable_head)
441 WARN_ON(vmw_view_destroy(&entry->res));
442}
443
444/**
445 * vmw_view_surface_list_destroy - Evict all views pointing to a surface
446 *
447 * @dev_priv: Pointer to a device private struct.
448 * @list: List of views pointing to a surface.
449 *
450 * This function evicts all views pointing to a surface. This is typically
451 * called before the surface is evicted.
452 */
453void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
454 struct list_head *list)
455{
456 struct vmw_view *entry, *next;
457
458 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
459
460 list_for_each_entry_safe(entry, next, list, srf_head)
461 WARN_ON(vmw_view_destroy(&entry->res));
462}
463
464/**
465 * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
466 * pointing to.
467 *
468 * @res: pointer to a view resource.
469 *
470 * Note that the view itself is holding a reference, so as long
471 * the view resource is alive, the surface resource will be.
472 */
473struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
474{
475 return vmw_view(res)->srf;
476}
477
478/**
479 * vmw_view_lookup - Look up a view.
480 *
481 * @man: The context's cmdbuf ref manager.
482 * @view_type: The view type.
483 * @user_key: The view user id.
484 *
485 * returns a refcounted pointer to a view or an error pointer if not found.
486 */
487struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
488 enum vmw_view_type view_type,
489 u32 user_key)
490{
491 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
492 vmw_view_key(user_key, view_type));
493}
494
495const u32 vmw_view_destroy_cmds[] = {
496 [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
497 [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
498 [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
499};
500
501const SVGACOTableType vmw_view_cotables[] = {
502 [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
503 [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
504 [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
505};
506
507const SVGACOTableType vmw_so_cotables[] = {
508 [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
509 [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
510 [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
511 [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
512 [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
513 [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
514};
515
516
517/* To remove unused function warning */
518static void vmw_so_build_asserts(void) __attribute__((used));
519
520
521/*
522 * This function is unused at run-time, and only used to dump various build
523 * asserts important for code optimization assumptions.
524 */
525static void vmw_so_build_asserts(void)
526{
527 /* Assert that our vmw_view_cmd_to_type() function is correct. */
528 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
529 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
530 BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
531 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
532 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
533 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
534 BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
535 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
536 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
537 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
538
539 /* Assert that our "one body fits all" assumption is valid */
540 BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
541
542 /* Assert that the view key space can hold all view ids. */
543 BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
544
545 /*
546 * Assert that the offset of sid in all view define commands
547 * is what we assume it to be.
548 */
549 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
550 offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
551 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
552 offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
553 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
554 offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
555}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 000000000000..268738387b5e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
1/**************************************************************************
2 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26#ifndef VMW_SO_H
27#define VMW_SO_H
28
29enum vmw_view_type {
30 vmw_view_sr,
31 vmw_view_rt,
32 vmw_view_ds,
33 vmw_view_max,
34};
35
36enum vmw_so_type {
37 vmw_so_el,
38 vmw_so_bs,
39 vmw_so_ds,
40 vmw_so_rs,
41 vmw_so_ss,
42 vmw_so_so,
43 vmw_so_max,
44};
45
46/**
47 * union vmw_view_destroy - view destruction command body
48 *
49 * @rtv: RenderTarget view destruction command body
50 * @srv: ShaderResource view destruction command body
51 * @dsv: DepthStencil view destruction command body
52 * @view_id: A single u32 view id.
53 *
54 * The assumption here is that all union members are really represented by a
55 * single u32 in the command stream. If that's not the case,
56 * the size of this union will not equal the size of an u32, and the
57 * assumption is invalid, and we detect that at compile time in the
58 * vmw_so_build_asserts() function.
59 */
60union vmw_view_destroy {
61 struct SVGA3dCmdDXDestroyRenderTargetView rtv;
62 struct SVGA3dCmdDXDestroyShaderResourceView srv;
63 struct SVGA3dCmdDXDestroyDepthStencilView dsv;
64 u32 view_id;
65};
66
67/* Map enum vmw_view_type to view destroy command ids*/
68extern const u32 vmw_view_destroy_cmds[];
69
70/* Map enum vmw_view_type to SVGACOTableType */
71extern const SVGACOTableType vmw_view_cotables[];
72
73/* Map enum vmw_so_type to SVGACOTableType */
74extern const SVGACOTableType vmw_so_cotables[];
75
76/*
77 * vmw_view_cmd_to_type - Return the view type for a create or destroy command
78 *
79 * @id: The SVGA3D command id.
80 *
81 * For a given view create or destroy command id, return the corresponding
82 * enum vmw_view_type. If the command is unknown, return vmw_view_max.
83 * The validity of the simplified calculation is verified in the
84 * vmw_so_build_asserts() function.
85 */
86static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
87{
88 u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
89
90 if (tmp > (u32)vmw_view_max)
91 return vmw_view_max;
92
93 return (enum vmw_view_type) tmp;
94}
95
96/*
97 * vmw_so_cmd_to_type - Return the state object type for a
98 * create or destroy command
99 *
100 * @id: The SVGA3D command id.
101 *
102 * For a given state object create or destroy command id,
103 * return the corresponding enum vmw_so_type. If the command is uknown,
104 * return vmw_so_max. We should perhaps optimize this function using
105 * a similar strategy as vmw_view_cmd_to_type().
106 */
107static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
108{
109 switch (id) {
110 case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
111 case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
112 return vmw_so_el;
113 case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
114 case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
115 return vmw_so_bs;
116 case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
117 case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
118 return vmw_so_ds;
119 case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
120 case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
121 return vmw_so_rs;
122 case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
123 case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
124 return vmw_so_ss;
125 case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
126 case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
127 return vmw_so_so;
128 default:
129 break;
130 }
131 return vmw_so_max;
132}
133
134/*
135 * View management - vmwgfx_so.c
136 */
137extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
138 struct vmw_resource *ctx,
139 struct vmw_resource *srf,
140 enum vmw_view_type view_type,
141 u32 user_key,
142 const void *cmd,
143 size_t cmd_size,
144 struct list_head *list);
145
146extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
147 u32 user_key, enum vmw_view_type view_type,
148 struct list_head *list,
149 struct vmw_resource **res_p);
150
151extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
152 struct list_head *view_list);
153extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
154 struct list_head *list,
155 bool readback);
156extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
157extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
158 enum vmw_view_type view_type,
159 u32 user_key);
160#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 000000000000..c22e2df1b336
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
1/******************************************************************************
2 *
3 * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 ******************************************************************************/
27
28#include "vmwgfx_kms.h"
29#include "device_include/svga3d_surfacedefs.h"
30#include <drm/drm_plane_helper.h>
31
32#define vmw_crtc_to_stdu(x) \
33 container_of(x, struct vmw_screen_target_display_unit, base.crtc)
34#define vmw_encoder_to_stdu(x) \
35 container_of(x, struct vmw_screen_target_display_unit, base.encoder)
36#define vmw_connector_to_stdu(x) \
37 container_of(x, struct vmw_screen_target_display_unit, base.connector)
38
39
40
41enum stdu_content_type {
42 SAME_AS_DISPLAY = 0,
43 SEPARATE_SURFACE,
44 SEPARATE_DMA
45};
46
47/**
48 * struct vmw_stdu_dirty - closure structure for the update functions
49 *
50 * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
51 * @transfer: Transfer direction for DMA command.
52 * @left: Left side of bounding box.
53 * @right: Right side of bounding box.
54 * @top: Top side of bounding box.
55 * @bottom: Bottom side of bounding box.
56 * @buf: DMA buffer when DMA-ing between buffer and screen targets.
57 * @sid: Surface ID when copying between surface and screen targets.
58 */
59struct vmw_stdu_dirty {
60 struct vmw_kms_dirty base;
61 SVGA3dTransferType transfer;
62 s32 left, right, top, bottom;
63 u32 pitch;
64 union {
65 struct vmw_dma_buffer *buf;
66 u32 sid;
67 };
68};
69
70/*
71 * SVGA commands that are used by this code. Please see the device headers
72 * for explanation.
73 */
74struct vmw_stdu_update {
75 SVGA3dCmdHeader header;
76 SVGA3dCmdUpdateGBScreenTarget body;
77};
78
79struct vmw_stdu_dma {
80 SVGA3dCmdHeader header;
81 SVGA3dCmdSurfaceDMA body;
82};
83
84struct vmw_stdu_surface_copy {
85 SVGA3dCmdHeader header;
86 SVGA3dCmdSurfaceCopy body;
87};
88
89
90/**
91 * struct vmw_screen_target_display_unit
92 *
93 * @base: VMW specific DU structure
94 * @display_srf: surface to be displayed. The dimension of this will always
95 * match the display mode. If the display mode matches
96 * content_vfbs dimensions, then this is a pointer into the
97 * corresponding field in content_vfbs. If not, then this
98 * is a separate buffer to which content_vfbs will blit to.
99 * @content_fb: holds the rendered content, can be a surface or DMA buffer
100 * @content_type: content_fb type
101 * @defined: true if the current display unit has been initialized
102 */
103struct vmw_screen_target_display_unit {
104 struct vmw_display_unit base;
105
106 struct vmw_surface *display_srf;
107 struct drm_framebuffer *content_fb;
108
109 enum stdu_content_type content_fb_type;
110
111 bool defined;
112};
113
114
115
116static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
117
118
119
120/******************************************************************************
121 * Screen Target Display Unit helper Functions
122 *****************************************************************************/
123
124/**
125 * vmw_stdu_pin_display - pins the resource associated with the display surface
126 *
127 * @stdu: contains the display surface
128 *
129 * Since the display surface can either be a private surface allocated by us,
130 * or it can point to the content surface, we use this function to not pin the
131 * same resource twice.
132 */
133static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
134{
135 return vmw_resource_pin(&stdu->display_srf->res, false);
136}
137
138
139
140/**
141 * vmw_stdu_unpin_display - unpins the resource associated with display surface
142 *
143 * @stdu: contains the display surface
144 *
145 * If the display surface was privatedly allocated by
146 * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
147 * won't be automatically cleaned up when all the framebuffers are freed. As
148 * such, we have to explicitly call vmw_resource_unreference() to get it freed.
149 */
150static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
151{
152 if (stdu->display_srf) {
153 struct vmw_resource *res = &stdu->display_srf->res;
154
155 vmw_resource_unpin(res);
156
157 if (stdu->content_fb_type != SAME_AS_DISPLAY) {
158 vmw_resource_unreference(&res);
159 stdu->content_fb_type = SAME_AS_DISPLAY;
160 }
161
162 stdu->display_srf = NULL;
163 }
164}
165
166
167
168/******************************************************************************
169 * Screen Target Display Unit CRTC Functions
170 *****************************************************************************/
171
172
173/**
174 * vmw_stdu_crtc_destroy - cleans up the STDU
175 *
176 * @crtc: used to get a reference to the containing STDU
177 */
178static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
179{
180 vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
181}
182
183/**
184 * vmw_stdu_define_st - Defines a Screen Target
185 *
186 * @dev_priv: VMW DRM device
187 * @stdu: display unit to create a Screen Target for
188 *
189 * Creates a STDU that we can used later. This function is called whenever the
190 * framebuffer size changes.
191 *
192 * RETURNs:
193 * 0 on success, error code on failure
194 */
195static int vmw_stdu_define_st(struct vmw_private *dev_priv,
196 struct vmw_screen_target_display_unit *stdu)
197{
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDefineGBScreenTarget body;
201 } *cmd;
202
203 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
204
205 if (unlikely(cmd == NULL)) {
206 DRM_ERROR("Out of FIFO space defining Screen Target\n");
207 return -ENOMEM;
208 }
209
210 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
211 cmd->header.size = sizeof(cmd->body);
212
213 cmd->body.stid = stdu->base.unit;
214 cmd->body.width = stdu->display_srf->base_size.width;
215 cmd->body.height = stdu->display_srf->base_size.height;
216 cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
217 cmd->body.dpi = 0;
218 cmd->body.xRoot = stdu->base.crtc.x;
219 cmd->body.yRoot = stdu->base.crtc.y;
220
221 if (!stdu->base.is_implicit) {
222 cmd->body.xRoot = stdu->base.gui_x;
223 cmd->body.yRoot = stdu->base.gui_y;
224 }
225
226 vmw_fifo_commit(dev_priv, sizeof(*cmd));
227
228 stdu->defined = true;
229
230 return 0;
231}
232
233
234
235/**
236 * vmw_stdu_bind_st - Binds a surface to a Screen Target
237 *
238 * @dev_priv: VMW DRM device
239 * @stdu: display unit affected
240 * @res: Buffer to bind to the screen target. Set to NULL to blank screen.
241 *
242 * Binding a surface to a Screen Target the same as flipping
243 */
244static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
245 struct vmw_screen_target_display_unit *stdu,
246 struct vmw_resource *res)
247{
248 SVGA3dSurfaceImageId image;
249
250 struct {
251 SVGA3dCmdHeader header;
252 SVGA3dCmdBindGBScreenTarget body;
253 } *cmd;
254
255
256 if (!stdu->defined) {
257 DRM_ERROR("No screen target defined\n");
258 return -EINVAL;
259 }
260
261 /* Set up image using information in vfb */
262 memset(&image, 0, sizeof(image));
263 image.sid = res ? res->id : SVGA3D_INVALID_ID;
264
265 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
266
267 if (unlikely(cmd == NULL)) {
268 DRM_ERROR("Out of FIFO space binding a screen target\n");
269 return -ENOMEM;
270 }
271
272 cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
273 cmd->header.size = sizeof(cmd->body);
274
275 cmd->body.stid = stdu->base.unit;
276 cmd->body.image = image;
277
278 vmw_fifo_commit(dev_priv, sizeof(*cmd));
279
280 return 0;
281}
282
283/**
284 * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
285 * bounding box.
286 *
287 * @cmd: Pointer to command stream.
288 * @unit: Screen target unit.
289 * @left: Left side of bounding box.
290 * @right: Right side of bounding box.
291 * @top: Top side of bounding box.
292 * @bottom: Bottom side of bounding box.
293 */
294static void vmw_stdu_populate_update(void *cmd, int unit,
295 s32 left, s32 right, s32 top, s32 bottom)
296{
297 struct vmw_stdu_update *update = cmd;
298
299 update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
300 update->header.size = sizeof(update->body);
301
302 update->body.stid = unit;
303 update->body.rect.x = left;
304 update->body.rect.y = top;
305 update->body.rect.w = right - left;
306 update->body.rect.h = bottom - top;
307}
308
309/**
310 * vmw_stdu_update_st - Full update of a Screen Target
311 *
312 * @dev_priv: VMW DRM device
313 * @stdu: display unit affected
314 *
315 * This function needs to be called whenever the content of a screen
316 * target has changed completely. Typically as a result of a backing
317 * surface change.
318 *
319 * RETURNS:
320 * 0 on success, error code on failure
321 */
322static int vmw_stdu_update_st(struct vmw_private *dev_priv,
323 struct vmw_screen_target_display_unit *stdu)
324{
325 struct vmw_stdu_update *cmd;
326 struct drm_crtc *crtc = &stdu->base.crtc;
327
328 if (!stdu->defined) {
329 DRM_ERROR("No screen target defined");
330 return -EINVAL;
331 }
332
333 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
334
335 if (unlikely(cmd == NULL)) {
336 DRM_ERROR("Out of FIFO space updating a Screen Target\n");
337 return -ENOMEM;
338 }
339
340 vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
341 0, crtc->mode.vdisplay);
342
343 vmw_fifo_commit(dev_priv, sizeof(*cmd));
344
345 return 0;
346}
347
348
349
350/**
351 * vmw_stdu_destroy_st - Destroy a Screen Target
352 *
353 * @dev_priv: VMW DRM device
354 * @stdu: display unit to destroy
355 */
356static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
357 struct vmw_screen_target_display_unit *stdu)
358{
359 int ret;
360
361 struct {
362 SVGA3dCmdHeader header;
363 SVGA3dCmdDestroyGBScreenTarget body;
364 } *cmd;
365
366
367 /* Nothing to do if not successfully defined */
368 if (unlikely(!stdu->defined))
369 return 0;
370
371 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
372
373 if (unlikely(cmd == NULL)) {
374 DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
375 return -ENOMEM;
376 }
377
378 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
379 cmd->header.size = sizeof(cmd->body);
380
381 cmd->body.stid = stdu->base.unit;
382
383 vmw_fifo_commit(dev_priv, sizeof(*cmd));
384
385 /* Force sync */
386 ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
387 if (unlikely(ret != 0))
388 DRM_ERROR("Failed to sync with HW");
389
390 stdu->defined = false;
391
392 return ret;
393}
394
395
396
397/**
398 * vmw_stdu_crtc_set_config - Sets a mode
399 *
400 * @set: mode parameters
401 *
402 * This function is the device-specific portion of the DRM CRTC mode set.
403 * For the SVGA device, we do this by defining a Screen Target, binding a
404 * GB Surface to that target, and finally update the screen target.
405 *
406 * RETURNS:
407 * 0 on success, error code otherwise
408 */
409static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
410{
411 struct vmw_private *dev_priv;
412 struct vmw_screen_target_display_unit *stdu;
413 struct vmw_framebuffer *vfb;
414 struct vmw_framebuffer_surface *new_vfbs;
415 struct drm_display_mode *mode;
416 struct drm_framebuffer *new_fb;
417 struct drm_crtc *crtc;
418 struct drm_encoder *encoder;
419 struct drm_connector *connector;
420 int ret;
421
422
423 if (!set || !set->crtc)
424 return -EINVAL;
425
426 crtc = set->crtc;
427 crtc->x = set->x;
428 crtc->y = set->y;
429 stdu = vmw_crtc_to_stdu(crtc);
430 mode = set->mode;
431 new_fb = set->fb;
432 dev_priv = vmw_priv(crtc->dev);
433
434
435 if (set->num_connectors > 1) {
436 DRM_ERROR("Too many connectors\n");
437 return -EINVAL;
438 }
439
440 if (set->num_connectors == 1 &&
441 set->connectors[0] != &stdu->base.connector) {
442 DRM_ERROR("Connectors don't match %p %p\n",
443 set->connectors[0], &stdu->base.connector);
444 return -EINVAL;
445 }
446
447
448 /* Since they always map one to one these are safe */
449 connector = &stdu->base.connector;
450 encoder = &stdu->base.encoder;
451
452
453 /*
454 * After this point the CRTC will be considered off unless a new fb
455 * is bound
456 */
457 if (stdu->defined) {
458 /* Unbind current surface by binding an invalid one */
459 ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
460 if (unlikely(ret != 0))
461 return ret;
462
463 /* Update Screen Target, display will now be blank */
464 if (crtc->primary->fb) {
465 vmw_stdu_update_st(dev_priv, stdu);
466 if (unlikely(ret != 0))
467 return ret;
468 }
469
470 crtc->primary->fb = NULL;
471 crtc->enabled = false;
472 encoder->crtc = NULL;
473 connector->encoder = NULL;
474
475 vmw_stdu_unpin_display(stdu);
476 stdu->content_fb = NULL;
477 stdu->content_fb_type = SAME_AS_DISPLAY;
478
479 ret = vmw_stdu_destroy_st(dev_priv, stdu);
480 /* The hardware is hung, give up */
481 if (unlikely(ret != 0))
482 return ret;
483 }
484
485
486 /* Any of these conditions means the caller wants CRTC off */
487 if (set->num_connectors == 0 || !mode || !new_fb)
488 return 0;
489
490
491 if (set->x + mode->hdisplay > new_fb->width ||
492 set->y + mode->vdisplay > new_fb->height) {
493 DRM_ERROR("Set outside of framebuffer\n");
494 return -EINVAL;
495 }
496
497 stdu->content_fb = new_fb;
498 vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
499
500 if (vfb->dmabuf)
501 stdu->content_fb_type = SEPARATE_DMA;
502
503 /*
504 * If the requested mode is different than the width and height
505 * of the FB or if the content buffer is a DMA buf, then allocate
506 * a display FB that matches the dimension of the mode
507 */
508 if (mode->hdisplay != new_fb->width ||
509 mode->vdisplay != new_fb->height ||
510 stdu->content_fb_type != SAME_AS_DISPLAY) {
511 struct vmw_surface content_srf;
512 struct drm_vmw_size display_base_size = {0};
513 struct vmw_surface *display_srf;
514
515
516 display_base_size.width = mode->hdisplay;
517 display_base_size.height = mode->vdisplay;
518 display_base_size.depth = 1;
519
520 /*
521 * If content buffer is a DMA buf, then we have to construct
522 * surface info
523 */
524 if (stdu->content_fb_type == SEPARATE_DMA) {
525
526 switch (new_fb->bits_per_pixel) {
527 case 32:
528 content_srf.format = SVGA3D_X8R8G8B8;
529 break;
530
531 case 16:
532 content_srf.format = SVGA3D_R5G6B5;
533 break;
534
535 case 8:
536 content_srf.format = SVGA3D_P8;
537 break;
538
539 default:
540 DRM_ERROR("Invalid format\n");
541 ret = -EINVAL;
542 goto err_unref_content;
543 }
544
545 content_srf.flags = 0;
546 content_srf.mip_levels[0] = 1;
547 content_srf.multisample_count = 0;
548 } else {
549
550 stdu->content_fb_type = SEPARATE_SURFACE;
551
552 new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
553 content_srf = *new_vfbs->surface;
554 }
555
556
557 ret = vmw_surface_gb_priv_define(crtc->dev,
558 0, /* because kernel visible only */
559 content_srf.flags,
560 content_srf.format,
561 true, /* a scanout buffer */
562 content_srf.mip_levels[0],
563 content_srf.multisample_count,
564 0,
565 display_base_size,
566 &display_srf);
567 if (unlikely(ret != 0)) {
568 DRM_ERROR("Cannot allocate a display FB.\n");
569 goto err_unref_content;
570 }
571
572 stdu->display_srf = display_srf;
573 } else {
574 new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
575 stdu->display_srf = new_vfbs->surface;
576 }
577
578
579 ret = vmw_stdu_pin_display(stdu);
580 if (unlikely(ret != 0)) {
581 stdu->display_srf = NULL;
582 goto err_unref_content;
583 }
584
585 vmw_svga_enable(dev_priv);
586
587 /*
588 * Steps to displaying a surface, assume surface is already
589 * bound:
590 * 1. define a screen target
591 * 2. bind a fb to the screen target
592 * 3. update that screen target (this is done later by
593 * vmw_kms_stdu_do_surface_dirty_or_present)
594 */
595 ret = vmw_stdu_define_st(dev_priv, stdu);
596 if (unlikely(ret != 0))
597 goto err_unpin_display_and_content;
598
599 ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
600 if (unlikely(ret != 0))
601 goto err_unpin_destroy_st;
602
603
604 connector->encoder = encoder;
605 encoder->crtc = crtc;
606
607 crtc->mode = *mode;
608 crtc->primary->fb = new_fb;
609 crtc->enabled = true;
610
611 return ret;
612
613err_unpin_destroy_st:
614 vmw_stdu_destroy_st(dev_priv, stdu);
615err_unpin_display_and_content:
616 vmw_stdu_unpin_display(stdu);
617err_unref_content:
618 stdu->content_fb = NULL;
619 return ret;
620}
621
622
623
624/**
625 * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
626 *
627 * @crtc: CRTC to attach FB to
628 * @fb: FB to attach
629 * @event: Event to be posted. This event should've been alloced
630 * using k[mz]alloc, and should've been completely initialized.
631 * @page_flip_flags: Input flags.
632 *
633 * If the STDU uses the same display and content buffers, i.e. a true flip,
634 * this function will replace the existing display buffer with the new content
635 * buffer.
636 *
637 * If the STDU uses different display and content buffers, i.e. a blit, then
638 * only the content buffer will be updated.
639 *
640 * RETURNS:
641 * 0 on success, error code on failure
642 */
643static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
644 struct drm_framebuffer *new_fb,
645 struct drm_pending_vblank_event *event,
646 uint32_t flags)
647
648{
649 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
650 struct vmw_screen_target_display_unit *stdu;
651 int ret;
652
653 if (crtc == NULL)
654 return -EINVAL;
655
656 dev_priv = vmw_priv(crtc->dev);
657 stdu = vmw_crtc_to_stdu(crtc);
658 crtc->primary->fb = new_fb;
659 stdu->content_fb = new_fb;
660
661 if (stdu->display_srf) {
662 /*
663 * If the display surface is the same as the content surface
664 * then remove the reference
665 */
666 if (stdu->content_fb_type == SAME_AS_DISPLAY) {
667 if (stdu->defined) {
668 /* Unbind the current surface */
669 ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
670 if (unlikely(ret != 0))
671 goto err_out;
672 }
673 vmw_stdu_unpin_display(stdu);
674 stdu->display_srf = NULL;
675 }
676 }
677
678
679 if (!new_fb) {
680 /* Blanks the display */
681 (void) vmw_stdu_update_st(dev_priv, stdu);
682
683 return 0;
684 }
685
686
687 if (stdu->content_fb_type == SAME_AS_DISPLAY) {
688 stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
689 ret = vmw_stdu_pin_display(stdu);
690 if (ret) {
691 stdu->display_srf = NULL;
692 goto err_out;
693 }
694
695 /* Bind display surface */
696 ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
697 if (unlikely(ret != 0))
698 goto err_unpin_display_and_content;
699 }
700
701 /* Update display surface: after this point everything is bound */
702 ret = vmw_stdu_update_st(dev_priv, stdu);
703 if (unlikely(ret != 0))
704 return ret;
705
706 if (event) {
707 struct vmw_fence_obj *fence = NULL;
708 struct drm_file *file_priv = event->base.file_priv;
709
710 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
711 if (!fence)
712 return -ENOMEM;
713
714 ret = vmw_event_fence_action_queue(file_priv, fence,
715 &event->base,
716 &event->event.tv_sec,
717 &event->event.tv_usec,
718 true);
719 vmw_fence_obj_unreference(&fence);
720 }
721
722 return ret;
723
724err_unpin_display_and_content:
725 vmw_stdu_unpin_display(stdu);
726err_out:
727 crtc->primary->fb = NULL;
728 stdu->content_fb = NULL;
729 return ret;
730}
731
732
733/**
734 * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
735 *
736 * @dirty: The closure structure.
737 *
738 * Encodes a surface DMA command cliprect and updates the bounding box
739 * for the DMA.
740 */
741static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
742{
743 struct vmw_stdu_dirty *ddirty =
744 container_of(dirty, struct vmw_stdu_dirty, base);
745 struct vmw_stdu_dma *cmd = dirty->cmd;
746 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
747
748 blit += dirty->num_hits;
749 blit->srcx = dirty->fb_x;
750 blit->srcy = dirty->fb_y;
751 blit->x = dirty->unit_x1;
752 blit->y = dirty->unit_y1;
753 blit->d = 1;
754 blit->w = dirty->unit_x2 - dirty->unit_x1;
755 blit->h = dirty->unit_y2 - dirty->unit_y1;
756 dirty->num_hits++;
757
758 if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
759 return;
760
761 /* Destination bounding box */
762 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
763 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
764 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
765 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
766}
767
768/**
769 * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
770 *
771 * @dirty: The closure structure.
772 *
773 * Fills in the missing fields in a DMA command, and optionally encodes
774 * a screen target update command, depending on transfer direction.
775 */
776static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
777{
778 struct vmw_stdu_dirty *ddirty =
779 container_of(dirty, struct vmw_stdu_dirty, base);
780 struct vmw_screen_target_display_unit *stdu =
781 container_of(dirty->unit, typeof(*stdu), base);
782 struct vmw_stdu_dma *cmd = dirty->cmd;
783 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
784 SVGA3dCmdSurfaceDMASuffix *suffix =
785 (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
786 size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
787
788 if (!dirty->num_hits) {
789 vmw_fifo_commit(dirty->dev_priv, 0);
790 return;
791 }
792
793 cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
794 cmd->header.size = sizeof(cmd->body) + blit_size;
795 vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
796 cmd->body.guest.pitch = ddirty->pitch;
797 cmd->body.host.sid = stdu->display_srf->res.id;
798 cmd->body.host.face = 0;
799 cmd->body.host.mipmap = 0;
800 cmd->body.transfer = ddirty->transfer;
801 suffix->suffixSize = sizeof(*suffix);
802 suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
803
804 if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
805 blit_size += sizeof(struct vmw_stdu_update);
806
807 vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
808 ddirty->left, ddirty->right,
809 ddirty->top, ddirty->bottom);
810 }
811
812 vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
813
814 ddirty->left = ddirty->top = S32_MAX;
815 ddirty->right = ddirty->bottom = S32_MIN;
816}
817
818/**
819 * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
820 * framebuffer and the screen target system.
821 *
822 * @dev_priv: Pointer to the device private structure.
823 * @file_priv: Pointer to a struct drm-file identifying the caller. May be
824 * set to NULL, but then @user_fence_rep must also be set to NULL.
825 * @vfb: Pointer to the dma-buffer backed framebuffer.
826 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
827 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
828 * be NULL.
829 * @num_clips: Number of clip rects in @clips or @vclips.
830 * @increment: Increment to use when looping over @clips or @vclips.
831 * @to_surface: Whether to DMA to the screen target system as opposed to
832 * from the screen target system.
833 * @interruptible: Whether to perform waits interruptible if possible.
834 *
835 * If DMA-ing till the screen target system, the function will also notify
836 * the screen target system that a bounding box of the cliprects has been
837 * updated.
838 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
839 * interrupted.
840 */
841int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
842 struct drm_file *file_priv,
843 struct vmw_framebuffer *vfb,
844 struct drm_vmw_fence_rep __user *user_fence_rep,
845 struct drm_clip_rect *clips,
846 struct drm_vmw_rect *vclips,
847 uint32_t num_clips,
848 int increment,
849 bool to_surface,
850 bool interruptible)
851{
852 struct vmw_dma_buffer *buf =
853 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
854 struct vmw_stdu_dirty ddirty;
855 int ret;
856
857 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
858 false);
859 if (ret)
860 return ret;
861
862 ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
863 SVGA3D_READ_HOST_VRAM;
864 ddirty.left = ddirty.top = S32_MAX;
865 ddirty.right = ddirty.bottom = S32_MIN;
866 ddirty.pitch = vfb->base.pitches[0];
867 ddirty.buf = buf;
868 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
869 ddirty.base.clip = vmw_stdu_dmabuf_clip;
870 ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
871 num_clips * sizeof(SVGA3dCopyBox) +
872 sizeof(SVGA3dCmdSurfaceDMASuffix);
873 if (to_surface)
874 ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
875
876 ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
877 0, 0, num_clips, increment, &ddirty.base);
878 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
879 user_fence_rep);
880
881 return ret;
882}
883
884/**
885 * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
886 *
887 * @dirty: The closure structure.
888 *
889 * Encodes a surface copy command cliprect and updates the bounding box
890 * for the copy.
891 */
892static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
893{
894 struct vmw_stdu_dirty *sdirty =
895 container_of(dirty, struct vmw_stdu_dirty, base);
896 struct vmw_stdu_surface_copy *cmd = dirty->cmd;
897 struct vmw_screen_target_display_unit *stdu =
898 container_of(dirty->unit, typeof(*stdu), base);
899
900 if (sdirty->sid != stdu->display_srf->res.id) {
901 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
902
903 blit += dirty->num_hits;
904 blit->srcx = dirty->fb_x;
905 blit->srcy = dirty->fb_y;
906 blit->x = dirty->unit_x1;
907 blit->y = dirty->unit_y1;
908 blit->d = 1;
909 blit->w = dirty->unit_x2 - dirty->unit_x1;
910 blit->h = dirty->unit_y2 - dirty->unit_y1;
911 }
912
913 dirty->num_hits++;
914
915 /* Destination bounding box */
916 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
917 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
918 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
919 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
920}
921
922/**
923 * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
924 * copy command.
925 *
926 * @dirty: The closure structure.
927 *
928 * Fills in the missing fields in a surface copy command, and encodes a screen
929 * target update command.
930 */
931static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
932{
933 struct vmw_stdu_dirty *sdirty =
934 container_of(dirty, struct vmw_stdu_dirty, base);
935 struct vmw_screen_target_display_unit *stdu =
936 container_of(dirty->unit, typeof(*stdu), base);
937 struct vmw_stdu_surface_copy *cmd = dirty->cmd;
938 struct vmw_stdu_update *update;
939 size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
940 size_t commit_size;
941
942 if (!dirty->num_hits) {
943 vmw_fifo_commit(dirty->dev_priv, 0);
944 return;
945 }
946
947 if (sdirty->sid != stdu->display_srf->res.id) {
948 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
949
950 cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
951 cmd->header.size = sizeof(cmd->body) + blit_size;
952 cmd->body.src.sid = sdirty->sid;
953 cmd->body.dest.sid = stdu->display_srf->res.id;
954 update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
955 commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
956 } else {
957 update = dirty->cmd;
958 commit_size = sizeof(*update);
959 }
960
961 vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
962 sdirty->right, sdirty->top, sdirty->bottom);
963
964 vmw_fifo_commit(dirty->dev_priv, commit_size);
965
966 sdirty->left = sdirty->top = S32_MAX;
967 sdirty->right = sdirty->bottom = S32_MIN;
968}
969
970/**
971 * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
972 *
973 * @dev_priv: Pointer to the device private structure.
974 * @framebuffer: Pointer to the surface-buffer backed framebuffer.
975 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
976 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
977 * be NULL.
978 * @srf: Pointer to surface to blit from. If NULL, the surface attached
979 * to @framebuffer will be used.
980 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
981 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
982 * @num_clips: Number of clip rects in @clips.
983 * @inc: Increment to use when looping over @clips.
984 * @out_fence: If non-NULL, will return a ref-counted pointer to a
985 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
986 * case the device has already synchronized.
987 *
988 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
989 * interrupted.
990 */
991int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
992 struct vmw_framebuffer *framebuffer,
993 struct drm_clip_rect *clips,
994 struct drm_vmw_rect *vclips,
995 struct vmw_resource *srf,
996 s32 dest_x,
997 s32 dest_y,
998 unsigned num_clips, int inc,
999 struct vmw_fence_obj **out_fence)
1000{
1001 struct vmw_framebuffer_surface *vfbs =
1002 container_of(framebuffer, typeof(*vfbs), base);
1003 struct vmw_stdu_dirty sdirty;
1004 int ret;
1005
1006 if (!srf)
1007 srf = &vfbs->surface->res;
1008
1009 ret = vmw_kms_helper_resource_prepare(srf, true);
1010 if (ret)
1011 return ret;
1012
1013 if (vfbs->is_dmabuf_proxy) {
1014 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
1015 if (ret)
1016 goto out_finish;
1017 }
1018
1019 sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
1020 sdirty.base.clip = vmw_kms_stdu_surface_clip;
1021 sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
1022 sizeof(SVGA3dCopyBox) * num_clips +
1023 sizeof(struct vmw_stdu_update);
1024 sdirty.sid = srf->id;
1025 sdirty.left = sdirty.top = S32_MAX;
1026 sdirty.right = sdirty.bottom = S32_MIN;
1027
1028 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
1029 dest_x, dest_y, num_clips, inc,
1030 &sdirty.base);
1031out_finish:
1032 vmw_kms_helper_resource_finish(srf, out_fence);
1033
1034 return ret;
1035}
1036
1037
1038/*
1039 * Screen Target CRTC dispatch table
1040 */
1041static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
1042 .save = vmw_du_crtc_save,
1043 .restore = vmw_du_crtc_restore,
1044 .cursor_set = vmw_du_crtc_cursor_set,
1045 .cursor_move = vmw_du_crtc_cursor_move,
1046 .gamma_set = vmw_du_crtc_gamma_set,
1047 .destroy = vmw_stdu_crtc_destroy,
1048 .set_config = vmw_stdu_crtc_set_config,
1049 .page_flip = vmw_stdu_crtc_page_flip,
1050};
1051
1052
1053
1054/******************************************************************************
1055 * Screen Target Display Unit Encoder Functions
1056 *****************************************************************************/
1057
1058/**
1059 * vmw_stdu_encoder_destroy - cleans up the STDU
1060 *
1061 * @encoder: used the get the containing STDU
1062 *
1063 * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
1064 * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
1065 * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
1066 * get called.
1067 */
1068static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
1069{
1070 vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
1071}
1072
1073static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
1074 .destroy = vmw_stdu_encoder_destroy,
1075};
1076
1077
1078
1079/******************************************************************************
1080 * Screen Target Display Unit Connector Functions
1081 *****************************************************************************/
1082
1083/**
1084 * vmw_stdu_connector_destroy - cleans up the STDU
1085 *
1086 * @connector: used to get the containing STDU
1087 *
1088 * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
1089 * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
1090 * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
1091 * get called.
1092 */
1093static void vmw_stdu_connector_destroy(struct drm_connector *connector)
1094{
1095 vmw_stdu_destroy(vmw_connector_to_stdu(connector));
1096}
1097
1098
1099
1100static struct drm_connector_funcs vmw_stdu_connector_funcs = {
1101 .dpms = vmw_du_connector_dpms,
1102 .save = vmw_du_connector_save,
1103 .restore = vmw_du_connector_restore,
1104 .detect = vmw_du_connector_detect,
1105 .fill_modes = vmw_du_connector_fill_modes,
1106 .set_property = vmw_du_connector_set_property,
1107 .destroy = vmw_stdu_connector_destroy,
1108};
1109
1110
1111
1112/**
1113 * vmw_stdu_init - Sets up a Screen Target Display Unit
1114 *
1115 * @dev_priv: VMW DRM device
1116 * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
1117 *
1118 * This function is called once per CRTC, and allocates one Screen Target
1119 * display unit to represent that CRTC. Since the SVGA device does not separate
1120 * out encoder and connector, they are represented as part of the STDU as well.
1121 */
1122static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
1123{
1124 struct vmw_screen_target_display_unit *stdu;
1125 struct drm_device *dev = dev_priv->dev;
1126 struct drm_connector *connector;
1127 struct drm_encoder *encoder;
1128 struct drm_crtc *crtc;
1129
1130
1131 stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
1132 if (!stdu)
1133 return -ENOMEM;
1134
1135 stdu->base.unit = unit;
1136 crtc = &stdu->base.crtc;
1137 encoder = &stdu->base.encoder;
1138 connector = &stdu->base.connector;
1139
1140 stdu->base.pref_active = (unit == 0);
1141 stdu->base.pref_width = dev_priv->initial_width;
1142 stdu->base.pref_height = dev_priv->initial_height;
1143 stdu->base.is_implicit = true;
1144
1145 drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
1146 DRM_MODE_CONNECTOR_VIRTUAL);
1147 connector->status = vmw_du_connector_detect(connector, false);
1148
1149 drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
1150 DRM_MODE_ENCODER_VIRTUAL);
1151 drm_mode_connector_attach_encoder(connector, encoder);
1152 encoder->possible_crtcs = (1 << unit);
1153 encoder->possible_clones = 0;
1154
1155 (void) drm_connector_register(connector);
1156
1157 drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
1158
1159 drm_mode_crtc_set_gamma_size(crtc, 256);
1160
1161 drm_object_attach_property(&connector->base,
1162 dev->mode_config.dirty_info_property,
1163 1);
1164
1165 return 0;
1166}
1167
1168
1169
1170/**
1171 * vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
1172 *
1173 * @stdu: Screen Target Display Unit to be destroyed
1174 *
1175 * Clean up after vmw_stdu_init
1176 */
1177static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
1178{
1179 vmw_stdu_unpin_display(stdu);
1180
1181 vmw_du_cleanup(&stdu->base);
1182 kfree(stdu);
1183}
1184
1185
1186
1187/******************************************************************************
1188 * Screen Target Display KMS Functions
1189 *
1190 * These functions are called by the common KMS code in vmwgfx_kms.c
1191 *****************************************************************************/
1192
1193/**
1194 * vmw_kms_stdu_init_display - Initializes a Screen Target based display
1195 *
1196 * @dev_priv: VMW DRM device
1197 *
1198 * This function initialize a Screen Target based display device. It checks
1199 * the capability bits to make sure the underlying hardware can support
1200 * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
1201 * Units, as supported by the display hardware.
1202 *
1203 * RETURNS:
1204 * 0 on success, error code otherwise
1205 */
1206int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
1207{
1208 struct drm_device *dev = dev_priv->dev;
1209 int i, ret;
1210
1211
1212 /* Do nothing if Screen Target support is turned off */
1213 if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
1214 return -ENOSYS;
1215
1216 if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
1217 return -ENOSYS;
1218
1219 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
1220 if (unlikely(ret != 0))
1221 return ret;
1222
1223 ret = drm_mode_create_dirty_info_property(dev);
1224 if (unlikely(ret != 0))
1225 goto err_vblank_cleanup;
1226
1227 dev_priv->active_display_unit = vmw_du_screen_target;
1228
1229 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
1230 ret = vmw_stdu_init(dev_priv, i);
1231
1232 if (unlikely(ret != 0)) {
1233 DRM_ERROR("Failed to initialize STDU %d", i);
1234 goto err_vblank_cleanup;
1235 }
1236 }
1237
1238 DRM_INFO("Screen Target Display device initialized\n");
1239
1240 return 0;
1241
1242err_vblank_cleanup:
1243 drm_vblank_cleanup(dev);
1244 return ret;
1245}
1246
1247
1248
1249/**
1250 * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
1251 *
1252 * @dev_priv: VMW DRM device
1253 *
1254 * Frees up any resources allocated by vmw_kms_stdu_init_display
1255 *
1256 * RETURNS:
1257 * 0 on success
1258 */
1259int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
1260{
1261 struct drm_device *dev = dev_priv->dev;
1262
1263 drm_vblank_cleanup(dev);
1264
1265 return 0;
1266}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3e59da..5b8595b78429 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_so.h"
31#include "vmwgfx_binding.h"
30#include <ttm/ttm_placement.h> 32#include <ttm/ttm_placement.h>
31#include "svga3d_surfacedefs.h" 33#include "device_include/svga3d_surfacedefs.h"
34
32 35
33/** 36/**
34 * struct vmw_user_surface - User-space visible surface resource 37 * struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
36 * @base: The TTM base object handling user-space visibility. 39 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata. 40 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface. 41 * @size: TTM accounting size for the surface.
39 * @master: master of the creating client. Used for security check. 42 * @master: master of the creating client. Used for security check.
40 */ 43 */
41struct vmw_user_surface { 44struct vmw_user_surface {
42 struct ttm_prime_object prime; 45 struct ttm_prime_object prime;
@@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
220 cmd->header.size = cmd_len; 223 cmd->header.size = cmd_len;
221 cmd->body.sid = srf->res.id; 224 cmd->body.sid = srf->res.id;
222 cmd->body.surfaceFlags = srf->flags; 225 cmd->body.surfaceFlags = srf->flags;
223 cmd->body.format = cpu_to_le32(srf->format); 226 cmd->body.format = srf->format;
224 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 227 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
225 cmd->body.face[i].numMipLevels = srf->mip_levels[i]; 228 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
226 229
@@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
340 dev_priv->used_memory_size -= res->backup_size; 343 dev_priv->used_memory_size -= res->backup_size;
341 mutex_unlock(&dev_priv->cmdbuf_mutex); 344 mutex_unlock(&dev_priv->cmdbuf_mutex);
342 } 345 }
343 vmw_3d_resource_dec(dev_priv, false); 346 vmw_fifo_resource_dec(dev_priv);
344} 347}
345 348
346/** 349/**
@@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
576 579
577 BUG_ON(res_free == NULL); 580 BUG_ON(res_free == NULL);
578 if (!dev_priv->has_mob) 581 if (!dev_priv->has_mob)
579 (void) vmw_3d_resource_inc(dev_priv, false); 582 vmw_fifo_resource_inc(dev_priv);
580 ret = vmw_resource_init(dev_priv, res, true, res_free, 583 ret = vmw_resource_init(dev_priv, res, true, res_free,
581 (dev_priv->has_mob) ? &vmw_gb_surface_func : 584 (dev_priv->has_mob) ? &vmw_gb_surface_func :
582 &vmw_legacy_surface_func); 585 &vmw_legacy_surface_func);
583 586
584 if (unlikely(ret != 0)) { 587 if (unlikely(ret != 0)) {
585 if (!dev_priv->has_mob) 588 if (!dev_priv->has_mob)
586 vmw_3d_resource_dec(dev_priv, false); 589 vmw_fifo_resource_dec(dev_priv);
587 res_free(res); 590 res_free(res);
588 return ret; 591 return ret;
589 } 592 }
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
593 * surface validate. 596 * surface validate.
594 */ 597 */
595 598
599 INIT_LIST_HEAD(&srf->view_list);
596 vmw_resource_activate(res, vmw_hw_surface_destroy); 600 vmw_resource_activate(res, vmw_hw_surface_destroy);
597 return ret; 601 return ret;
598} 602}
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
723 desc = svga3dsurface_get_desc(req->format); 727 desc = svga3dsurface_get_desc(req->format);
724 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 728 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
725 DRM_ERROR("Invalid surface format for surface creation.\n"); 729 DRM_ERROR("Invalid surface format for surface creation.\n");
730 DRM_ERROR("Format requested is: %d\n", req->format);
726 return -EINVAL; 731 return -EINVAL;
727 } 732 }
728 733
@@ -1018,17 +1023,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1018{ 1023{
1019 struct vmw_private *dev_priv = res->dev_priv; 1024 struct vmw_private *dev_priv = res->dev_priv;
1020 struct vmw_surface *srf = vmw_res_to_srf(res); 1025 struct vmw_surface *srf = vmw_res_to_srf(res);
1021 uint32_t cmd_len, submit_len; 1026 uint32_t cmd_len, cmd_id, submit_len;
1022 int ret; 1027 int ret;
1023 struct { 1028 struct {
1024 SVGA3dCmdHeader header; 1029 SVGA3dCmdHeader header;
1025 SVGA3dCmdDefineGBSurface body; 1030 SVGA3dCmdDefineGBSurface body;
1026 } *cmd; 1031 } *cmd;
1032 struct {
1033 SVGA3dCmdHeader header;
1034 SVGA3dCmdDefineGBSurface_v2 body;
1035 } *cmd2;
1027 1036
1028 if (likely(res->id != -1)) 1037 if (likely(res->id != -1))
1029 return 0; 1038 return 0;
1030 1039
1031 (void) vmw_3d_resource_inc(dev_priv, false); 1040 vmw_fifo_resource_inc(dev_priv);
1032 ret = vmw_resource_alloc_id(res); 1041 ret = vmw_resource_alloc_id(res);
1033 if (unlikely(ret != 0)) { 1042 if (unlikely(ret != 0)) {
1034 DRM_ERROR("Failed to allocate a surface id.\n"); 1043 DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1040 goto out_no_fifo; 1049 goto out_no_fifo;
1041 } 1050 }
1042 1051
1043 cmd_len = sizeof(cmd->body); 1052 if (srf->array_size > 0) {
1044 submit_len = sizeof(*cmd); 1053 /* has_dx checked on creation time. */
1054 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1055 cmd_len = sizeof(cmd2->body);
1056 submit_len = sizeof(*cmd2);
1057 } else {
1058 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1059 cmd_len = sizeof(cmd->body);
1060 submit_len = sizeof(*cmd);
1061 }
1062
1045 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1063 cmd = vmw_fifo_reserve(dev_priv, submit_len);
1064 cmd2 = (typeof(cmd2))cmd;
1046 if (unlikely(cmd == NULL)) { 1065 if (unlikely(cmd == NULL)) {
1047 DRM_ERROR("Failed reserving FIFO space for surface " 1066 DRM_ERROR("Failed reserving FIFO space for surface "
1048 "creation.\n"); 1067 "creation.\n");
@@ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1050 goto out_no_fifo; 1069 goto out_no_fifo;
1051 } 1070 }
1052 1071
1053 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1072 if (srf->array_size > 0) {
1054 cmd->header.size = cmd_len; 1073 cmd2->header.id = cmd_id;
1055 cmd->body.sid = srf->res.id; 1074 cmd2->header.size = cmd_len;
1056 cmd->body.surfaceFlags = srf->flags; 1075 cmd2->body.sid = srf->res.id;
1057 cmd->body.format = cpu_to_le32(srf->format); 1076 cmd2->body.surfaceFlags = srf->flags;
1058 cmd->body.numMipLevels = srf->mip_levels[0]; 1077 cmd2->body.format = cpu_to_le32(srf->format);
1059 cmd->body.multisampleCount = srf->multisample_count; 1078 cmd2->body.numMipLevels = srf->mip_levels[0];
1060 cmd->body.autogenFilter = srf->autogen_filter; 1079 cmd2->body.multisampleCount = srf->multisample_count;
1061 cmd->body.size.width = srf->base_size.width; 1080 cmd2->body.autogenFilter = srf->autogen_filter;
1062 cmd->body.size.height = srf->base_size.height; 1081 cmd2->body.size.width = srf->base_size.width;
1063 cmd->body.size.depth = srf->base_size.depth; 1082 cmd2->body.size.height = srf->base_size.height;
1083 cmd2->body.size.depth = srf->base_size.depth;
1084 cmd2->body.arraySize = srf->array_size;
1085 } else {
1086 cmd->header.id = cmd_id;
1087 cmd->header.size = cmd_len;
1088 cmd->body.sid = srf->res.id;
1089 cmd->body.surfaceFlags = srf->flags;
1090 cmd->body.format = cpu_to_le32(srf->format);
1091 cmd->body.numMipLevels = srf->mip_levels[0];
1092 cmd->body.multisampleCount = srf->multisample_count;
1093 cmd->body.autogenFilter = srf->autogen_filter;
1094 cmd->body.size.width = srf->base_size.width;
1095 cmd->body.size.height = srf->base_size.height;
1096 cmd->body.size.depth = srf->base_size.depth;
1097 }
1098
1064 vmw_fifo_commit(dev_priv, submit_len); 1099 vmw_fifo_commit(dev_priv, submit_len);
1065 1100
1066 return 0; 1101 return 0;
@@ -1068,7 +1103,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1068out_no_fifo: 1103out_no_fifo:
1069 vmw_resource_release_id(res); 1104 vmw_resource_release_id(res);
1070out_no_id: 1105out_no_id:
1071 vmw_3d_resource_dec(dev_priv, false); 1106 vmw_fifo_resource_dec(dev_priv);
1072 return ret; 1107 return ret;
1073} 1108}
1074 1109
@@ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
1188static int vmw_gb_surface_destroy(struct vmw_resource *res) 1223static int vmw_gb_surface_destroy(struct vmw_resource *res)
1189{ 1224{
1190 struct vmw_private *dev_priv = res->dev_priv; 1225 struct vmw_private *dev_priv = res->dev_priv;
1226 struct vmw_surface *srf = vmw_res_to_srf(res);
1191 struct { 1227 struct {
1192 SVGA3dCmdHeader header; 1228 SVGA3dCmdHeader header;
1193 SVGA3dCmdDestroyGBSurface body; 1229 SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1197 return 0; 1233 return 0;
1198 1234
1199 mutex_lock(&dev_priv->binding_mutex); 1235 mutex_lock(&dev_priv->binding_mutex);
1200 vmw_context_binding_res_list_scrub(&res->binding_head); 1236 vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1237 vmw_binding_res_list_scrub(&res->binding_head);
1201 1238
1202 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1239 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1203 if (unlikely(cmd == NULL)) { 1240 if (unlikely(cmd == NULL)) {
@@ -1213,11 +1250,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 1250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1214 mutex_unlock(&dev_priv->binding_mutex); 1251 mutex_unlock(&dev_priv->binding_mutex);
1215 vmw_resource_release_id(res); 1252 vmw_resource_release_id(res);
1216 vmw_3d_resource_dec(dev_priv, false); 1253 vmw_fifo_resource_dec(dev_priv);
1217 1254
1218 return 0; 1255 return 0;
1219} 1256}
1220 1257
1258
1221/** 1259/**
1222 * vmw_gb_surface_define_ioctl - Ioctl function implementing 1260 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1223 * the user surface define functionality. 1261 * the user surface define functionality.
@@ -1241,77 +1279,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1241 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1279 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1242 int ret; 1280 int ret;
1243 uint32_t size; 1281 uint32_t size;
1244 const struct svga3d_surface_desc *desc;
1245 uint32_t backup_handle; 1282 uint32_t backup_handle;
1246 1283
1284
1247 if (unlikely(vmw_user_surface_size == 0)) 1285 if (unlikely(vmw_user_surface_size == 0))
1248 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1286 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1249 128; 1287 128;
1250 1288
1251 size = vmw_user_surface_size + 128; 1289 size = vmw_user_surface_size + 128;
1252 1290
1253 desc = svga3dsurface_get_desc(req->format); 1291 /* Define a surface based on the parameters. */
1254 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 1292 ret = vmw_surface_gb_priv_define(dev,
1255 DRM_ERROR("Invalid surface format for surface creation.\n"); 1293 size,
1256 return -EINVAL; 1294 req->svga3d_flags,
1257 } 1295 req->format,
1258 1296 req->drm_surface_flags & drm_vmw_surface_flag_scanout,
1259 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1297 req->mip_levels,
1298 req->multisample_count,
1299 req->array_size,
1300 req->base_size,
1301 &srf);
1260 if (unlikely(ret != 0)) 1302 if (unlikely(ret != 0))
1261 return ret; 1303 return ret;
1262 1304
1263 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 1305 user_srf = container_of(srf, struct vmw_user_surface, srf);
1264 size, false, true);
1265 if (unlikely(ret != 0)) {
1266 if (ret != -ERESTARTSYS)
1267 DRM_ERROR("Out of graphics memory for surface"
1268 " creation.\n");
1269 goto out_unlock;
1270 }
1271
1272 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1273 if (unlikely(user_srf == NULL)) {
1274 ret = -ENOMEM;
1275 goto out_no_user_srf;
1276 }
1277
1278 srf = &user_srf->srf;
1279 res = &srf->res;
1280
1281 srf->flags = req->svga3d_flags;
1282 srf->format = req->format;
1283 srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
1284 srf->mip_levels[0] = req->mip_levels;
1285 srf->num_sizes = 1;
1286 srf->sizes = NULL;
1287 srf->offsets = NULL;
1288 user_srf->size = size;
1289 srf->base_size = req->base_size;
1290 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1291 srf->multisample_count = req->multisample_count;
1292 res->backup_size = svga3dsurface_get_serialized_size
1293 (srf->format, srf->base_size, srf->mip_levels[0],
1294 srf->flags & SVGA3D_SURFACE_CUBEMAP);
1295
1296 user_srf->prime.base.shareable = false;
1297 user_srf->prime.base.tfile = NULL;
1298 if (drm_is_primary_client(file_priv)) 1306 if (drm_is_primary_client(file_priv))
1299 user_srf->master = drm_master_get(file_priv->master); 1307 user_srf->master = drm_master_get(file_priv->master);
1300 1308
1301 /** 1309 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1302 * From this point, the generic resource management functions
1303 * destroy the object on failure.
1304 */
1305
1306 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1307 if (unlikely(ret != 0)) 1310 if (unlikely(ret != 0))
1308 goto out_unlock; 1311 return ret;
1312
1313 res = &user_srf->srf.res;
1314
1309 1315
1310 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1316 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1311 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1317 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1312 &res->backup); 1318 &res->backup);
1313 } else if (req->drm_surface_flags & 1319 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1314 drm_vmw_surface_flag_create_buffer) 1320 res->backup_size) {
1321 DRM_ERROR("Surface backup buffer is too small.\n");
1322 vmw_dmabuf_unreference(&res->backup);
1323 ret = -EINVAL;
1324 goto out_unlock;
1325 }
1326 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1315 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1327 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1316 res->backup_size, 1328 res->backup_size,
1317 req->drm_surface_flags & 1329 req->drm_surface_flags &
@@ -1324,7 +1336,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1324 goto out_unlock; 1336 goto out_unlock;
1325 } 1337 }
1326 1338
1327 tmp = vmw_resource_reference(&srf->res); 1339 tmp = vmw_resource_reference(res);
1328 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 1340 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1329 req->drm_surface_flags & 1341 req->drm_surface_flags &
1330 drm_vmw_surface_flag_shareable, 1342 drm_vmw_surface_flag_shareable,
@@ -1337,7 +1349,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1337 goto out_unlock; 1349 goto out_unlock;
1338 } 1350 }
1339 1351
1340 rep->handle = user_srf->prime.base.hash.key; 1352 rep->handle = user_srf->prime.base.hash.key;
1341 rep->backup_size = res->backup_size; 1353 rep->backup_size = res->backup_size;
1342 if (res->backup) { 1354 if (res->backup) {
1343 rep->buffer_map_handle = 1355 rep->buffer_map_handle =
@@ -1352,10 +1364,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1352 1364
1353 vmw_resource_unreference(&res); 1365 vmw_resource_unreference(&res);
1354 1366
1355 ttm_read_unlock(&dev_priv->reservation_sem);
1356 return 0;
1357out_no_user_srf:
1358 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1359out_unlock: 1367out_unlock:
1360 ttm_read_unlock(&dev_priv->reservation_sem); 1368 ttm_read_unlock(&dev_priv->reservation_sem);
1361 return ret; 1369 return ret;
@@ -1415,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1415 rep->creq.drm_surface_flags = 0; 1423 rep->creq.drm_surface_flags = 0;
1416 rep->creq.multisample_count = srf->multisample_count; 1424 rep->creq.multisample_count = srf->multisample_count;
1417 rep->creq.autogen_filter = srf->autogen_filter; 1425 rep->creq.autogen_filter = srf->autogen_filter;
1426 rep->creq.array_size = srf->array_size;
1418 rep->creq.buffer_handle = backup_handle; 1427 rep->creq.buffer_handle = backup_handle;
1419 rep->creq.base_size = srf->base_size; 1428 rep->creq.base_size = srf->base_size;
1420 rep->crep.handle = user_srf->prime.base.hash.key; 1429 rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1438,137 @@ out_bad_resource:
1429 1438
1430 return ret; 1439 return ret;
1431} 1440}
1441
1442/**
1443 * vmw_surface_gb_priv_define - Define a private GB surface
1444 *
1445 * @dev: Pointer to a struct drm_device
1446 * @user_accounting_size: Used to track user-space memory usage, set
1447 * to 0 for kernel mode only memory
1448 * @svga3d_flags: SVGA3d surface flags for the device
1449 * @format: requested surface format
1450 * @for_scanout: true if inteded to be used for scanout buffer
1451 * @num_mip_levels: number of MIP levels
1452 * @multisample_count:
1453 * @array_size: Surface array size.
1454 * @size: width, heigh, depth of the surface requested
1455 * @user_srf_out: allocated user_srf. Set to NULL on failure.
1456 *
1457 * GB surfaces allocated by this function will not have a user mode handle, and
1458 * thus will only be visible to vmwgfx. For optimization reasons the
1459 * surface may later be given a user mode handle by another function to make
1460 * it available to user mode drivers.
1461 */
1462int vmw_surface_gb_priv_define(struct drm_device *dev,
1463 uint32_t user_accounting_size,
1464 uint32_t svga3d_flags,
1465 SVGA3dSurfaceFormat format,
1466 bool for_scanout,
1467 uint32_t num_mip_levels,
1468 uint32_t multisample_count,
1469 uint32_t array_size,
1470 struct drm_vmw_size size,
1471 struct vmw_surface **srf_out)
1472{
1473 struct vmw_private *dev_priv = vmw_priv(dev);
1474 struct vmw_user_surface *user_srf;
1475 struct vmw_surface *srf;
1476 int ret;
1477 u32 num_layers;
1478
1479 *srf_out = NULL;
1480
1481 if (for_scanout) {
1482 if (!svga3dsurface_is_screen_target_format(format)) {
1483 DRM_ERROR("Invalid Screen Target surface format.");
1484 return -EINVAL;
1485 }
1486 } else {
1487 const struct svga3d_surface_desc *desc;
1488
1489 desc = svga3dsurface_get_desc(format);
1490 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1491 DRM_ERROR("Invalid surface format.\n");
1492 return -EINVAL;
1493 }
1494 }
1495
1496 /* array_size must be null for non-GL3 host. */
1497 if (array_size > 0 && !dev_priv->has_dx) {
1498 DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1499 return -EINVAL;
1500 }
1501
1502 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1503 if (unlikely(ret != 0))
1504 return ret;
1505
1506 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1507 user_accounting_size, false, true);
1508 if (unlikely(ret != 0)) {
1509 if (ret != -ERESTARTSYS)
1510 DRM_ERROR("Out of graphics memory for surface"
1511 " creation.\n");
1512 goto out_unlock;
1513 }
1514
1515 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1516 if (unlikely(user_srf == NULL)) {
1517 ret = -ENOMEM;
1518 goto out_no_user_srf;
1519 }
1520
1521 *srf_out = &user_srf->srf;
1522 user_srf->size = user_accounting_size;
1523 user_srf->prime.base.shareable = false;
1524 user_srf->prime.base.tfile = NULL;
1525
1526 srf = &user_srf->srf;
1527 srf->flags = svga3d_flags;
1528 srf->format = format;
1529 srf->scanout = for_scanout;
1530 srf->mip_levels[0] = num_mip_levels;
1531 srf->num_sizes = 1;
1532 srf->sizes = NULL;
1533 srf->offsets = NULL;
1534 srf->base_size = size;
1535 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1536 srf->array_size = array_size;
1537 srf->multisample_count = multisample_count;
1538
1539 if (array_size)
1540 num_layers = array_size;
1541 else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1542 num_layers = SVGA3D_MAX_SURFACE_FACES;
1543 else
1544 num_layers = 1;
1545
1546 srf->res.backup_size =
1547 svga3dsurface_get_serialized_size(srf->format,
1548 srf->base_size,
1549 srf->mip_levels[0],
1550 num_layers);
1551
1552 if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1553 srf->res.backup_size += sizeof(SVGA3dDXSOState);
1554
1555 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1556 for_scanout)
1557 srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1558
1559 /*
1560 * From this point, the generic resource management functions
1561 * destroy the object on failure.
1562 */
1563 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1564
1565 ttm_read_unlock(&dev_priv->reservation_sem);
1566 return ret;
1567
1568out_no_user_srf:
1569 ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1570
1571out_unlock:
1572 ttm_read_unlock(&dev_priv->reservation_sem);
1573 return ret;
1574}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb3a997..e771091d2cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index fbc6ee6ca337..52a6fd224127 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -31,6 +31,9 @@
31#include "dev.h" 31#include "dev.h"
32 32
33#define MIPI_CAL_CTRL 0x00 33#define MIPI_CAL_CTRL 0x00
34#define MIPI_CAL_CTRL_NOISE_FILTER(x) (((x) & 0xf) << 26)
35#define MIPI_CAL_CTRL_PRESCALE(x) (((x) & 0x3) << 24)
36#define MIPI_CAL_CTRL_CLKEN_OVR (1 << 4)
34#define MIPI_CAL_CTRL_START (1 << 0) 37#define MIPI_CAL_CTRL_START (1 << 0)
35 38
36#define MIPI_CAL_AUTOCAL_CTRL 0x01 39#define MIPI_CAL_AUTOCAL_CTRL 0x01
@@ -44,15 +47,18 @@
44#define MIPI_CAL_CONFIG_CSIC 0x07 47#define MIPI_CAL_CONFIG_CSIC 0x07
45#define MIPI_CAL_CONFIG_CSID 0x08 48#define MIPI_CAL_CONFIG_CSID 0x08
46#define MIPI_CAL_CONFIG_CSIE 0x09 49#define MIPI_CAL_CONFIG_CSIE 0x09
50#define MIPI_CAL_CONFIG_CSIF 0x0a
47#define MIPI_CAL_CONFIG_DSIA 0x0e 51#define MIPI_CAL_CONFIG_DSIA 0x0e
48#define MIPI_CAL_CONFIG_DSIB 0x0f 52#define MIPI_CAL_CONFIG_DSIB 0x0f
49#define MIPI_CAL_CONFIG_DSIC 0x10 53#define MIPI_CAL_CONFIG_DSIC 0x10
50#define MIPI_CAL_CONFIG_DSID 0x11 54#define MIPI_CAL_CONFIG_DSID 0x11
51 55
52#define MIPI_CAL_CONFIG_DSIAB_CLK 0x19 56#define MIPI_CAL_CONFIG_DSIA_CLK 0x19
53#define MIPI_CAL_CONFIG_DSICD_CLK 0x1a 57#define MIPI_CAL_CONFIG_DSIB_CLK 0x1a
54#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b 58#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
59#define MIPI_CAL_CONFIG_DSIC_CLK 0x1c
55#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c 60#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
61#define MIPI_CAL_CONFIG_DSID_CLK 0x1d
56#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d 62#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
57 63
58/* for data and clock lanes */ 64/* for data and clock lanes */
@@ -73,8 +79,11 @@
73 79
74#define MIPI_CAL_BIAS_PAD_CFG1 0x17 80#define MIPI_CAL_BIAS_PAD_CFG1 0x17
75#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16) 81#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
82#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
76 83
77#define MIPI_CAL_BIAS_PAD_CFG2 0x18 84#define MIPI_CAL_BIAS_PAD_CFG2 0x18
85#define MIPI_CAL_BIAS_PAD_VCLAMP(x) (((x) & 0x7) << 16)
86#define MIPI_CAL_BIAS_PAD_VAUXP(x) (((x) & 0x7) << 4)
78#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1) 87#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
79 88
80struct tegra_mipi_pad { 89struct tegra_mipi_pad {
@@ -86,13 +95,35 @@ struct tegra_mipi_soc {
86 bool has_clk_lane; 95 bool has_clk_lane;
87 const struct tegra_mipi_pad *pads; 96 const struct tegra_mipi_pad *pads;
88 unsigned int num_pads; 97 unsigned int num_pads;
98
99 bool clock_enable_override;
100 bool needs_vclamp_ref;
101
102 /* bias pad configuration settings */
103 u8 pad_drive_down_ref;
104 u8 pad_drive_up_ref;
105
106 u8 pad_vclamp_level;
107 u8 pad_vauxp_level;
108
109 /* calibration settings for data lanes */
110 u8 hspdos;
111 u8 hspuos;
112 u8 termos;
113
114 /* calibration settings for clock lanes */
115 u8 hsclkpdos;
116 u8 hsclkpuos;
89}; 117};
90 118
91struct tegra_mipi { 119struct tegra_mipi {
92 const struct tegra_mipi_soc *soc; 120 const struct tegra_mipi_soc *soc;
121 struct device *dev;
93 void __iomem *regs; 122 void __iomem *regs;
94 struct mutex lock; 123 struct mutex lock;
95 struct clk *clk; 124 struct clk *clk;
125
126 unsigned long usage_count;
96}; 127};
97 128
98struct tegra_mipi_device { 129struct tegra_mipi_device {
@@ -114,6 +145,67 @@ static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
114 writel(value, mipi->regs + (offset << 2)); 145 writel(value, mipi->regs + (offset << 2));
115} 146}
116 147
148static int tegra_mipi_power_up(struct tegra_mipi *mipi)
149{
150 u32 value;
151 int err;
152
153 err = clk_enable(mipi->clk);
154 if (err < 0)
155 return err;
156
157 value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
158 value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
159
160 if (mipi->soc->needs_vclamp_ref)
161 value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
162
163 tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
164
165 value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
166 value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
167 tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
168
169 clk_disable(mipi->clk);
170
171 return 0;
172}
173
174static int tegra_mipi_power_down(struct tegra_mipi *mipi)
175{
176 u32 value;
177 int err;
178
179 err = clk_enable(mipi->clk);
180 if (err < 0)
181 return err;
182
183 /*
184 * The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
185 * supplies the DSI pads. This must be kept enabled until none of the
186 * DSI lanes are used anymore.
187 */
188 value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
189 value |= MIPI_CAL_BIAS_PAD_PDVREG;
190 tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
191
192 /*
193 * MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
194 * control a regulator that supplies current to the pre-driver logic.
195 * Powering down this regulator causes DSI to fail, so it must remain
196 * powered on until none of the DSI lanes are used anymore.
197 */
198 value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
199
200 if (mipi->soc->needs_vclamp_ref)
201 value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
202
203 value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
204 tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
205
206 return 0;
207}
208
117struct tegra_mipi_device *tegra_mipi_request(struct device *device) 209struct tegra_mipi_device *tegra_mipi_request(struct device *device)
118{ 210{
119 struct device_node *np = device->of_node; 211 struct device_node *np = device->of_node;
@@ -150,6 +242,20 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
150 dev->pads = args.args[0]; 242 dev->pads = args.args[0];
151 dev->device = device; 243 dev->device = device;
152 244
245 mutex_lock(&dev->mipi->lock);
246
247 if (dev->mipi->usage_count++ == 0) {
248 err = tegra_mipi_power_up(dev->mipi);
249 if (err < 0) {
250 dev_err(dev->mipi->dev,
251 "failed to power up MIPI bricks: %d\n",
252 err);
253 return ERR_PTR(err);
254 }
255 }
256
257 mutex_unlock(&dev->mipi->lock);
258
153 return dev; 259 return dev;
154 260
155put: 261put:
@@ -164,6 +270,25 @@ EXPORT_SYMBOL(tegra_mipi_request);
164 270
165void tegra_mipi_free(struct tegra_mipi_device *device) 271void tegra_mipi_free(struct tegra_mipi_device *device)
166{ 272{
273 int err;
274
275 mutex_lock(&device->mipi->lock);
276
277 if (--device->mipi->usage_count == 0) {
278 err = tegra_mipi_power_down(device->mipi);
279 if (err < 0) {
280 /*
281 * Not much that can be done here, so an error message
282 * will have to do.
283 */
284 dev_err(device->mipi->dev,
285 "failed to power down MIPI bricks: %d\n",
286 err);
287 }
288 }
289
290 mutex_unlock(&device->mipi->lock);
291
167 platform_device_put(device->pdev); 292 platform_device_put(device->pdev);
168 kfree(device); 293 kfree(device);
169} 294}
@@ -199,16 +324,15 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
199 324
200 mutex_lock(&device->mipi->lock); 325 mutex_lock(&device->mipi->lock);
201 326
202 value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0); 327 value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
203 value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP; 328 MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
204 value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF; 329 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
205 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
206
207 tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
208 MIPI_CAL_BIAS_PAD_CFG1);
209 330
210 value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2); 331 value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
211 value &= ~MIPI_CAL_BIAS_PAD_PDVREG; 332 value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
333 value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
334 value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
335 value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
212 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2); 336 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
213 337
214 for (i = 0; i < soc->num_pads; i++) { 338 for (i = 0; i < soc->num_pads; i++) {
@@ -216,21 +340,38 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
216 340
217 if (device->pads & BIT(i)) { 341 if (device->pads & BIT(i)) {
218 data = MIPI_CAL_CONFIG_SELECT | 342 data = MIPI_CAL_CONFIG_SELECT |
219 MIPI_CAL_CONFIG_HSPDOS(0) | 343 MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
220 MIPI_CAL_CONFIG_HSPUOS(4) | 344 MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
221 MIPI_CAL_CONFIG_TERMOS(5); 345 MIPI_CAL_CONFIG_TERMOS(soc->termos);
222 clk = MIPI_CAL_CONFIG_SELECT | 346 clk = MIPI_CAL_CONFIG_SELECT |
223 MIPI_CAL_CONFIG_HSCLKPDOSD(0) | 347 MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
224 MIPI_CAL_CONFIG_HSCLKPUOSD(4); 348 MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
225 } 349 }
226 350
227 tegra_mipi_writel(device->mipi, data, soc->pads[i].data); 351 tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
228 352
229 if (soc->has_clk_lane) 353 if (soc->has_clk_lane && soc->pads[i].clk != 0)
230 tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk); 354 tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
231 } 355 }
232 356
233 value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL); 357 value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
358 value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
359 value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
360 value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
361 value |= MIPI_CAL_CTRL_PRESCALE(0x2);
362
363 if (!soc->clock_enable_override)
364 value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
365 else
366 value |= MIPI_CAL_CTRL_CLKEN_OVR;
367
368 tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
369
370 /* clear any pending status bits */
371 value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
372 tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
373
374 value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
234 value |= MIPI_CAL_CTRL_START; 375 value |= MIPI_CAL_CTRL_START;
235 tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL); 376 tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
236 377
@@ -259,6 +400,17 @@ static const struct tegra_mipi_soc tegra114_mipi_soc = {
259 .has_clk_lane = false, 400 .has_clk_lane = false,
260 .pads = tegra114_mipi_pads, 401 .pads = tegra114_mipi_pads,
261 .num_pads = ARRAY_SIZE(tegra114_mipi_pads), 402 .num_pads = ARRAY_SIZE(tegra114_mipi_pads),
403 .clock_enable_override = true,
404 .needs_vclamp_ref = true,
405 .pad_drive_down_ref = 0x2,
406 .pad_drive_up_ref = 0x0,
407 .pad_vclamp_level = 0x0,
408 .pad_vauxp_level = 0x0,
409 .hspdos = 0x0,
410 .hspuos = 0x4,
411 .termos = 0x5,
412 .hsclkpdos = 0x0,
413 .hsclkpuos = 0x4,
262}; 414};
263 415
264static const struct tegra_mipi_pad tegra124_mipi_pads[] = { 416static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
@@ -266,20 +418,80 @@ static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
266 { .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK }, 418 { .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
267 { .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK }, 419 { .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
268 { .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK }, 420 { .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
269 { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK }, 421 { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
270 { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK }, 422 { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
271 { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK }, 423 { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
272}; 424};
273 425
274static const struct tegra_mipi_soc tegra124_mipi_soc = { 426static const struct tegra_mipi_soc tegra124_mipi_soc = {
275 .has_clk_lane = true, 427 .has_clk_lane = true,
276 .pads = tegra124_mipi_pads, 428 .pads = tegra124_mipi_pads,
277 .num_pads = ARRAY_SIZE(tegra124_mipi_pads), 429 .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
430 .clock_enable_override = true,
431 .needs_vclamp_ref = true,
432 .pad_drive_down_ref = 0x2,
433 .pad_drive_up_ref = 0x0,
434 .pad_vclamp_level = 0x0,
435 .pad_vauxp_level = 0x0,
436 .hspdos = 0x0,
437 .hspuos = 0x0,
438 .termos = 0x0,
439 .hsclkpdos = 0x1,
440 .hsclkpuos = 0x2,
441};
442
443static const struct tegra_mipi_soc tegra132_mipi_soc = {
444 .has_clk_lane = true,
445 .pads = tegra124_mipi_pads,
446 .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
447 .clock_enable_override = false,
448 .needs_vclamp_ref = false,
449 .pad_drive_down_ref = 0x0,
450 .pad_drive_up_ref = 0x3,
451 .pad_vclamp_level = 0x0,
452 .pad_vauxp_level = 0x0,
453 .hspdos = 0x0,
454 .hspuos = 0x0,
455 .termos = 0x0,
456 .hsclkpdos = 0x3,
457 .hsclkpuos = 0x2,
458};
459
460static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
461 { .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
462 { .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
463 { .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
464 { .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
465 { .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
466 { .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
467 { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
468 { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
469 { .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
470 { .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
471};
472
473static const struct tegra_mipi_soc tegra210_mipi_soc = {
474 .has_clk_lane = true,
475 .pads = tegra210_mipi_pads,
476 .num_pads = ARRAY_SIZE(tegra210_mipi_pads),
477 .clock_enable_override = true,
478 .needs_vclamp_ref = false,
479 .pad_drive_down_ref = 0x0,
480 .pad_drive_up_ref = 0x3,
481 .pad_vclamp_level = 0x1,
482 .pad_vauxp_level = 0x1,
483 .hspdos = 0x0,
484 .hspuos = 0x2,
485 .termos = 0x0,
486 .hsclkpdos = 0x0,
487 .hsclkpuos = 0x2,
278}; 488};
279 489
280static struct of_device_id tegra_mipi_of_match[] = { 490static const struct of_device_id tegra_mipi_of_match[] = {
281 { .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc }, 491 { .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
282 { .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc }, 492 { .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
493 { .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
494 { .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
283 { }, 495 { },
284}; 496};
285 497
@@ -299,6 +511,7 @@ static int tegra_mipi_probe(struct platform_device *pdev)
299 return -ENOMEM; 511 return -ENOMEM;
300 512
301 mipi->soc = match->data; 513 mipi->soc = match->data;
514 mipi->dev = &pdev->dev;
302 515
303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 516 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
304 mipi->regs = devm_ioremap_resource(&pdev->dev, res); 517 mipi->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 37ac7b5dbd06..21060668fd25 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -6,17 +6,19 @@
6 * Licensed under GPLv2 6 * Licensed under GPLv2
7 * 7 *
8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs 8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
9 9 *
10 Switcher interface - methods require for ATPX and DCM 10 * Switcher interface - methods require for ATPX and DCM
11 - switchto - this throws the output MUX switch 11 * - switchto - this throws the output MUX switch
12 - discrete_set_power - sets the power state for the discrete card 12 * - discrete_set_power - sets the power state for the discrete card
13 13 *
14 GPU driver interface 14 * GPU driver interface
15 - set_gpu_state - this should do the equiv of s/r for the card 15 * - set_gpu_state - this should do the equiv of s/r for the card
16 - this should *not* set the discrete power state 16 * - this should *not* set the discrete power state
17 - switch_check - check if the device is in a position to switch now 17 * - switch_check - check if the device is in a position to switch now
18 */ 18 */
19 19
20#define pr_fmt(fmt) "vga_switcheroo: " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/seq_file.h> 23#include <linux/seq_file.h>
22#include <linux/uaccess.h> 24#include <linux/uaccess.h>
@@ -111,7 +113,7 @@ int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
111 113
112 vgasr_priv.handler = handler; 114 vgasr_priv.handler = handler;
113 if (vga_switcheroo_ready()) { 115 if (vga_switcheroo_ready()) {
114 printk(KERN_INFO "vga_switcheroo: enabled\n"); 116 pr_info("enabled\n");
115 vga_switcheroo_enable(); 117 vga_switcheroo_enable();
116 } 118 }
117 mutex_unlock(&vgasr_mutex); 119 mutex_unlock(&vgasr_mutex);
@@ -124,7 +126,7 @@ void vga_switcheroo_unregister_handler(void)
124 mutex_lock(&vgasr_mutex); 126 mutex_lock(&vgasr_mutex);
125 vgasr_priv.handler = NULL; 127 vgasr_priv.handler = NULL;
126 if (vgasr_priv.active) { 128 if (vgasr_priv.active) {
127 pr_info("vga_switcheroo: disabled\n"); 129 pr_info("disabled\n");
128 vga_switcheroo_debugfs_fini(&vgasr_priv); 130 vga_switcheroo_debugfs_fini(&vgasr_priv);
129 vgasr_priv.active = false; 131 vgasr_priv.active = false;
130 } 132 }
@@ -155,7 +157,7 @@ static int register_client(struct pci_dev *pdev,
155 vgasr_priv.registered_clients++; 157 vgasr_priv.registered_clients++;
156 158
157 if (vga_switcheroo_ready()) { 159 if (vga_switcheroo_ready()) {
158 printk(KERN_INFO "vga_switcheroo: enabled\n"); 160 pr_info("enabled\n");
159 vga_switcheroo_enable(); 161 vga_switcheroo_enable();
160 } 162 }
161 mutex_unlock(&vgasr_mutex); 163 mutex_unlock(&vgasr_mutex);
@@ -167,7 +169,8 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
167 bool driver_power_control) 169 bool driver_power_control)
168{ 170{
169 return register_client(pdev, ops, -1, 171 return register_client(pdev, ops, -1,
170 pdev == vga_default_device(), driver_power_control); 172 pdev == vga_default_device(),
173 driver_power_control);
171} 174}
172EXPORT_SYMBOL(vga_switcheroo_register_client); 175EXPORT_SYMBOL(vga_switcheroo_register_client);
173 176
@@ -183,6 +186,7 @@ static struct vga_switcheroo_client *
183find_client_from_pci(struct list_head *head, struct pci_dev *pdev) 186find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
184{ 187{
185 struct vga_switcheroo_client *client; 188 struct vga_switcheroo_client *client;
189
186 list_for_each_entry(client, head, list) 190 list_for_each_entry(client, head, list)
187 if (client->pdev == pdev) 191 if (client->pdev == pdev)
188 return client; 192 return client;
@@ -193,6 +197,7 @@ static struct vga_switcheroo_client *
193find_client_from_id(struct list_head *head, int client_id) 197find_client_from_id(struct list_head *head, int client_id)
194{ 198{
195 struct vga_switcheroo_client *client; 199 struct vga_switcheroo_client *client;
200
196 list_for_each_entry(client, head, list) 201 list_for_each_entry(client, head, list)
197 if (client->id == client_id) 202 if (client->id == client_id)
198 return client; 203 return client;
@@ -203,6 +208,7 @@ static struct vga_switcheroo_client *
203find_active_client(struct list_head *head) 208find_active_client(struct list_head *head)
204{ 209{
205 struct vga_switcheroo_client *client; 210 struct vga_switcheroo_client *client;
211
206 list_for_each_entry(client, head, list) 212 list_for_each_entry(client, head, list)
207 if (client->active && client_is_vga(client)) 213 if (client->active && client_is_vga(client))
208 return client; 214 return client;
@@ -235,7 +241,7 @@ void vga_switcheroo_unregister_client(struct pci_dev *pdev)
235 kfree(client); 241 kfree(client);
236 } 242 }
237 if (vgasr_priv.active && vgasr_priv.registered_clients < 2) { 243 if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
238 printk(KERN_INFO "vga_switcheroo: disabled\n"); 244 pr_info("disabled\n");
239 vga_switcheroo_debugfs_fini(&vgasr_priv); 245 vga_switcheroo_debugfs_fini(&vgasr_priv);
240 vgasr_priv.active = false; 246 vgasr_priv.active = false;
241 } 247 }
@@ -260,10 +266,12 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
260{ 266{
261 struct vga_switcheroo_client *client; 267 struct vga_switcheroo_client *client;
262 int i = 0; 268 int i = 0;
269
263 mutex_lock(&vgasr_mutex); 270 mutex_lock(&vgasr_mutex);
264 list_for_each_entry(client, &vgasr_priv.clients, list) { 271 list_for_each_entry(client, &vgasr_priv.clients, list) {
265 seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i, 272 seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
266 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", 273 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
274 "IGD",
267 client_is_vga(client) ? "" : "-Audio", 275 client_is_vga(client) ? "" : "-Audio",
268 client->active ? '+' : ' ', 276 client->active ? '+' : ' ',
269 client->driver_power_control ? "Dyn" : "", 277 client->driver_power_control ? "Dyn" : "",
@@ -347,6 +355,7 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
347 355
348 if (new_client->fb_info) { 356 if (new_client->fb_info) {
349 struct fb_event event; 357 struct fb_event event;
358
350 console_lock(); 359 console_lock();
351 event.info = new_client->fb_info; 360 event.info = new_client->fb_info;
352 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); 361 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
@@ -375,7 +384,7 @@ static bool check_can_switch(void)
375 384
376 list_for_each_entry(client, &vgasr_priv.clients, list) { 385 list_for_each_entry(client, &vgasr_priv.clients, list) {
377 if (!client->ops->can_switch(client->pdev)) { 386 if (!client->ops->can_switch(client->pdev)) {
378 printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id); 387 pr_err("client %x refused switch\n", client->id);
379 return false; 388 return false;
380 } 389 }
381 } 390 }
@@ -484,20 +493,20 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
484 if (can_switch) { 493 if (can_switch) {
485 ret = vga_switchto_stage1(client); 494 ret = vga_switchto_stage1(client);
486 if (ret) 495 if (ret)
487 printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); 496 pr_err("switching failed stage 1 %d\n", ret);
488 497
489 ret = vga_switchto_stage2(client); 498 ret = vga_switchto_stage2(client);
490 if (ret) 499 if (ret)
491 printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret); 500 pr_err("switching failed stage 2 %d\n", ret);
492 501
493 } else { 502 } else {
494 printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); 503 pr_info("setting delayed switch to client %d\n", client->id);
495 vgasr_priv.delayed_switch_active = true; 504 vgasr_priv.delayed_switch_active = true;
496 vgasr_priv.delayed_client_id = client_id; 505 vgasr_priv.delayed_client_id = client_id;
497 506
498 ret = vga_switchto_stage1(client); 507 ret = vga_switchto_stage1(client);
499 if (ret) 508 if (ret)
500 printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret); 509 pr_err("delayed switching stage 1 failed %d\n", ret);
501 } 510 }
502 511
503out: 512out:
@@ -516,32 +525,32 @@ static const struct file_operations vga_switcheroo_debugfs_fops = {
516 525
517static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv) 526static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
518{ 527{
519 if (priv->switch_file) { 528 debugfs_remove(priv->switch_file);
520 debugfs_remove(priv->switch_file); 529 priv->switch_file = NULL;
521 priv->switch_file = NULL; 530
522 } 531 debugfs_remove(priv->debugfs_root);
523 if (priv->debugfs_root) { 532 priv->debugfs_root = NULL;
524 debugfs_remove(priv->debugfs_root);
525 priv->debugfs_root = NULL;
526 }
527} 533}
528 534
529static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv) 535static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
530{ 536{
537 static const char mp[] = "/sys/kernel/debug";
538
531 /* already initialised */ 539 /* already initialised */
532 if (priv->debugfs_root) 540 if (priv->debugfs_root)
533 return 0; 541 return 0;
534 priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL); 542 priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
535 543
536 if (!priv->debugfs_root) { 544 if (!priv->debugfs_root) {
537 printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n"); 545 pr_err("Cannot create %s/vgaswitcheroo\n", mp);
538 goto fail; 546 goto fail;
539 } 547 }
540 548
541 priv->switch_file = debugfs_create_file("switch", 0644, 549 priv->switch_file = debugfs_create_file("switch", 0644,
542 priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops); 550 priv->debugfs_root, NULL,
551 &vga_switcheroo_debugfs_fops);
543 if (!priv->switch_file) { 552 if (!priv->switch_file) {
544 printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n"); 553 pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
545 goto fail; 554 goto fail;
546 } 555 }
547 return 0; 556 return 0;
@@ -560,7 +569,8 @@ int vga_switcheroo_process_delayed_switch(void)
560 if (!vgasr_priv.delayed_switch_active) 569 if (!vgasr_priv.delayed_switch_active)
561 goto err; 570 goto err;
562 571
563 printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id); 572 pr_info("processing delayed switch to %d\n",
573 vgasr_priv.delayed_client_id);
564 574
565 client = find_client_from_id(&vgasr_priv.clients, 575 client = find_client_from_id(&vgasr_priv.clients,
566 vgasr_priv.delayed_client_id); 576 vgasr_priv.delayed_client_id);
@@ -569,7 +579,7 @@ int vga_switcheroo_process_delayed_switch(void)
569 579
570 ret = vga_switchto_stage2(client); 580 ret = vga_switchto_stage2(client);
571 if (ret) 581 if (ret)
572 printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); 582 pr_err("delayed switching failed stage 2 %d\n", ret);
573 583
574 vgasr_priv.delayed_switch_active = false; 584 vgasr_priv.delayed_switch_active = false;
575 err = 0; 585 err = 0;
@@ -579,7 +589,8 @@ err:
579} 589}
580EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 590EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
581 591
582static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state) 592static void vga_switcheroo_power_switch(struct pci_dev *pdev,
593 enum vga_switcheroo_state state)
583{ 594{
584 struct vga_switcheroo_client *client; 595 struct vga_switcheroo_client *client;
585 596
@@ -598,7 +609,8 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switchero
598 609
599/* force a PCI device to a certain state - mainly to turn off audio clients */ 610/* force a PCI device to a certain state - mainly to turn off audio clients */
600 611
601void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) 612void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
613 enum vga_switcheroo_state dynamic)
602{ 614{
603 struct vga_switcheroo_client *client; 615 struct vga_switcheroo_client *client;
604 616
@@ -644,7 +656,8 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
644 656
645/* this version is for the case where the power switch is separate 657/* this version is for the case where the power switch is separate
646 to the device being powered down. */ 658 to the device being powered down. */
647int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) 659int vga_switcheroo_init_domain_pm_ops(struct device *dev,
660 struct dev_pm_domain *domain)
648{ 661{
649 /* copy over all the bus versions */ 662 /* copy over all the bus versions */
650 if (dev->bus && dev->bus->pm) { 663 if (dev->bus && dev->bus->pm) {
@@ -675,7 +688,8 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
675 /* we need to check if we have to switch back on the video 688 /* we need to check if we have to switch back on the video
676 device so the audio device can come back */ 689 device so the audio device can come back */
677 list_for_each_entry(client, &vgasr_priv.clients, list) { 690 list_for_each_entry(client, &vgasr_priv.clients, list) {
678 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) { 691 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
692 client_is_vga(client)) {
679 found = client; 693 found = client;
680 ret = pm_runtime_get_sync(&client->pdev->dev); 694 ret = pm_runtime_get_sync(&client->pdev->dev);
681 if (ret) { 695 if (ret) {
@@ -695,12 +709,15 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
695 return ret; 709 return ret;
696} 710}
697 711
698int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) 712int
713vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
714 struct dev_pm_domain *domain)
699{ 715{
700 /* copy over all the bus versions */ 716 /* copy over all the bus versions */
701 if (dev->bus && dev->bus->pm) { 717 if (dev->bus && dev->bus->pm) {
702 domain->ops = *dev->bus->pm; 718 domain->ops = *dev->bus->pm;
703 domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio; 719 domain->ops.runtime_resume =
720 vga_switcheroo_runtime_resume_hdmi_audio;
704 721
705 dev->pm_domain = domain; 722 dev->pm_domain = domain;
706 return 0; 723 return 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 7bcbf863656e..a0b433456107 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -29,6 +29,8 @@
29 * 29 *
30 */ 30 */
31 31
32#define pr_fmt(fmt) "vgaarb: " fmt
33
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/kernel.h> 35#include <linux/kernel.h>
34#include <linux/pci.h> 36#include <linux/pci.h>
@@ -134,7 +136,6 @@ struct pci_dev *vga_default_device(void)
134{ 136{
135 return vga_default; 137 return vga_default;
136} 138}
137
138EXPORT_SYMBOL_GPL(vga_default_device); 139EXPORT_SYMBOL_GPL(vga_default_device);
139 140
140void vga_set_default_device(struct pci_dev *pdev) 141void vga_set_default_device(struct pci_dev *pdev)
@@ -298,9 +299,9 @@ enable_them:
298 299
299 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); 300 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
300 301
301 if (!vgadev->bridge_has_one_vga) { 302 if (!vgadev->bridge_has_one_vga)
302 vga_irq_set_state(vgadev, true); 303 vga_irq_set_state(vgadev, true);
303 } 304
304 vgadev->owns |= wants; 305 vgadev->owns |= wants;
305lock_them: 306lock_them:
306 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); 307 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -452,15 +453,15 @@ bail:
452} 453}
453EXPORT_SYMBOL(vga_put); 454EXPORT_SYMBOL(vga_put);
454 455
455/* Rules for using a bridge to control a VGA descendant decoding: 456/*
456 if a bridge has only one VGA descendant then it can be used 457 * Rules for using a bridge to control a VGA descendant decoding: if a bridge
457 to control the VGA routing for that device. 458 * has only one VGA descendant then it can be used to control the VGA routing
458 It should always use the bridge closest to the device to control it. 459 * for that device. It should always use the bridge closest to the device to
459 If a bridge has a direct VGA descendant, but also have a sub-bridge 460 * control it. If a bridge has a direct VGA descendant, but also have a sub-
460 VGA descendant then we cannot use that bridge to control the direct VGA descendant. 461 * bridge VGA descendant then we cannot use that bridge to control the direct
461 So for every device we register, we need to iterate all its parent bridges 462 * VGA descendant. So for every device we register, we need to iterate all
462 so we can invalidate any devices using them properly. 463 * its parent bridges so we can invalidate any devices using them properly.
463*/ 464 */
464static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) 465static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
465{ 466{
466 struct vga_device *same_bridge_vgadev; 467 struct vga_device *same_bridge_vgadev;
@@ -484,21 +485,26 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
484 485
485 /* see if the share a bridge with this device */ 486 /* see if the share a bridge with this device */
486 if (new_bridge == bridge) { 487 if (new_bridge == bridge) {
487 /* if their direct parent bridge is the same 488 /*
488 as any bridge of this device then it can't be used 489 * If their direct parent bridge is the same
489 for that device */ 490 * as any bridge of this device then it can't
491 * be used for that device.
492 */
490 same_bridge_vgadev->bridge_has_one_vga = false; 493 same_bridge_vgadev->bridge_has_one_vga = false;
491 } 494 }
492 495
493 /* now iterate the previous devices bridge hierarchy */ 496 /*
494 /* if the new devices parent bridge is in the other devices 497 * Now iterate the previous devices bridge hierarchy.
495 hierarchy then we can't use it to control this device */ 498 * If the new devices parent bridge is in the other
499 * devices hierarchy then we can't use it to control
500 * this device
501 */
496 while (bus) { 502 while (bus) {
497 bridge = bus->self; 503 bridge = bus->self;
498 if (bridge) { 504
499 if (bridge == vgadev->pdev->bus->self) 505 if (bridge && bridge == vgadev->pdev->bus->self)
500 vgadev->bridge_has_one_vga = false; 506 vgadev->bridge_has_one_vga = false;
501 } 507
502 bus = bus->parent; 508 bus = bus->parent;
503 } 509 }
504 } 510 }
@@ -527,10 +533,10 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
527 /* Allocate structure */ 533 /* Allocate structure */
528 vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL); 534 vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
529 if (vgadev == NULL) { 535 if (vgadev == NULL) {
530 pr_err("vgaarb: failed to allocate pci device\n"); 536 pr_err("failed to allocate pci device\n");
531 /* What to do on allocation failure ? For now, let's 537 /*
532 * just do nothing, I'm not sure there is anything saner 538 * What to do on allocation failure ? For now, let's just do
533 * to be done 539 * nothing, I'm not sure there is anything saner to be done.
534 */ 540 */
535 return false; 541 return false;
536 } 542 }
@@ -566,8 +572,8 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
566 bridge = bus->self; 572 bridge = bus->self;
567 if (bridge) { 573 if (bridge) {
568 u16 l; 574 u16 l;
569 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 575
570 &l); 576 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
571 if (!(l & PCI_BRIDGE_CTL_VGA)) { 577 if (!(l & PCI_BRIDGE_CTL_VGA)) {
572 vgadev->owns = 0; 578 vgadev->owns = 0;
573 break; 579 break;
@@ -581,8 +587,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
581 */ 587 */
582 if (vga_default == NULL && 588 if (vga_default == NULL &&
583 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) { 589 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
584 pr_info("vgaarb: setting as boot device: PCI:%s\n", 590 pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
585 pci_name(pdev));
586 vga_set_default_device(pdev); 591 vga_set_default_device(pdev);
587 } 592 }
588 593
@@ -591,7 +596,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
591 /* Add to the list */ 596 /* Add to the list */
592 list_add(&vgadev->list, &vga_list); 597 list_add(&vgadev->list, &vga_list);
593 vga_count++; 598 vga_count++;
594 pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n", 599 pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
595 pci_name(pdev), 600 pci_name(pdev),
596 vga_iostate_to_str(vgadev->decodes), 601 vga_iostate_to_str(vgadev->decodes),
597 vga_iostate_to_str(vgadev->owns), 602 vga_iostate_to_str(vgadev->owns),
@@ -651,7 +656,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
651 decodes_unlocked = vgadev->locks & decodes_removed; 656 decodes_unlocked = vgadev->locks & decodes_removed;
652 vgadev->decodes = new_decodes; 657 vgadev->decodes = new_decodes;
653 658
654 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", 659 pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
655 pci_name(vgadev->pdev), 660 pci_name(vgadev->pdev),
656 vga_iostate_to_str(old_decodes), 661 vga_iostate_to_str(old_decodes),
657 vga_iostate_to_str(vgadev->decodes), 662 vga_iostate_to_str(vgadev->decodes),
@@ -673,10 +678,12 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
673 if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && 678 if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
674 new_decodes & VGA_RSRC_LEGACY_MASK) 679 new_decodes & VGA_RSRC_LEGACY_MASK)
675 vga_decode_count++; 680 vga_decode_count++;
676 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 681 pr_debug("decoding count now is: %d\n", vga_decode_count);
677} 682}
678 683
679static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 684static void __vga_set_legacy_decoding(struct pci_dev *pdev,
685 unsigned int decodes,
686 bool userspace)
680{ 687{
681 struct vga_device *vgadev; 688 struct vga_device *vgadev;
682 unsigned long flags; 689 unsigned long flags;
@@ -712,7 +719,8 @@ EXPORT_SYMBOL(vga_set_legacy_decoding);
712/* call with NULL to unregister */ 719/* call with NULL to unregister */
713int vga_client_register(struct pci_dev *pdev, void *cookie, 720int vga_client_register(struct pci_dev *pdev, void *cookie,
714 void (*irq_set_state)(void *cookie, bool state), 721 void (*irq_set_state)(void *cookie, bool state),
715 unsigned int (*set_vga_decode)(void *cookie, bool decode)) 722 unsigned int (*set_vga_decode)(void *cookie,
723 bool decode))
716{ 724{
717 int ret = -ENODEV; 725 int ret = -ENODEV;
718 struct vga_device *vgadev; 726 struct vga_device *vgadev;
@@ -832,7 +840,7 @@ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
832 return 1; 840 return 1;
833} 841}
834 842
835static ssize_t vga_arb_read(struct file *file, char __user * buf, 843static ssize_t vga_arb_read(struct file *file, char __user *buf,
836 size_t count, loff_t *ppos) 844 size_t count, loff_t *ppos)
837{ 845{
838 struct vga_arb_private *priv = file->private_data; 846 struct vga_arb_private *priv = file->private_data;
@@ -899,7 +907,7 @@ done:
899 * TODO: To avoid parsing inside kernel and to improve the speed we may 907 * TODO: To avoid parsing inside kernel and to improve the speed we may
900 * consider use ioctl here 908 * consider use ioctl here
901 */ 909 */
902static ssize_t vga_arb_write(struct file *file, const char __user * buf, 910static ssize_t vga_arb_write(struct file *file, const char __user *buf,
903 size_t count, loff_t *ppos) 911 size_t count, loff_t *ppos)
904{ 912{
905 struct vga_arb_private *priv = file->private_data; 913 struct vga_arb_private *priv = file->private_data;
@@ -1075,13 +1083,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
1075 ret_val = -EPROTO; 1083 ret_val = -EPROTO;
1076 goto done; 1084 goto done;
1077 } 1085 }
1078 pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, 1086 pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
1079 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1087 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1080 1088
1081 pdev = pci_get_domain_bus_and_slot(domain, bus, devfn); 1089 pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
1082 pr_debug("vgaarb: pdev %p\n", pdev); 1090 pr_debug("pdev %p\n", pdev);
1083 if (!pdev) { 1091 if (!pdev) {
1084 pr_err("vgaarb: invalid PCI address %x:%x:%x\n", 1092 pr_err("invalid PCI address %x:%x:%x\n",
1085 domain, bus, devfn); 1093 domain, bus, devfn);
1086 ret_val = -ENODEV; 1094 ret_val = -ENODEV;
1087 goto done; 1095 goto done;
@@ -1089,10 +1097,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
1089 } 1097 }
1090 1098
1091 vgadev = vgadev_find(pdev); 1099 vgadev = vgadev_find(pdev);
1092 pr_debug("vgaarb: vgadev %p\n", vgadev); 1100 pr_debug("vgadev %p\n", vgadev);
1093 if (vgadev == NULL) { 1101 if (vgadev == NULL) {
1094 pr_err("vgaarb: this pci device is not a vga device\n"); 1102 if (pdev) {
1095 pci_dev_put(pdev); 1103 pr_err("this pci device is not a vga device\n");
1104 pci_dev_put(pdev);
1105 }
1106
1096 ret_val = -ENODEV; 1107 ret_val = -ENODEV;
1097 goto done; 1108 goto done;
1098 } 1109 }
@@ -1109,7 +1120,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
1109 } 1120 }
1110 } 1121 }
1111 if (i == MAX_USER_CARDS) { 1122 if (i == MAX_USER_CARDS) {
1112 pr_err("vgaarb: maximum user cards (%d) number reached!\n", 1123 pr_err("maximum user cards (%d) number reached!\n",
1113 MAX_USER_CARDS); 1124 MAX_USER_CARDS);
1114 pci_dev_put(pdev); 1125 pci_dev_put(pdev);
1115 /* XXX: which value to return? */ 1126 /* XXX: which value to return? */
@@ -1125,7 +1136,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
1125 } else if (strncmp(curr_pos, "decodes ", 8) == 0) { 1136 } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
1126 curr_pos += 8; 1137 curr_pos += 8;
1127 remaining -= 8; 1138 remaining -= 8;
1128 pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); 1139 pr_debug("client 0x%p called 'decodes'\n", priv);
1129 1140
1130 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 1141 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1131 ret_val = -EPROTO; 1142 ret_val = -EPROTO;
@@ -1150,7 +1161,7 @@ done:
1150 return ret_val; 1161 return ret_val;
1151} 1162}
1152 1163
1153static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) 1164static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
1154{ 1165{
1155 struct vga_arb_private *priv = file->private_data; 1166 struct vga_arb_private *priv = file->private_data;
1156 1167
@@ -1246,7 +1257,8 @@ static void vga_arbiter_notify_clients(void)
1246 else 1257 else
1247 new_state = true; 1258 new_state = true;
1248 if (vgadev->set_vga_decode) { 1259 if (vgadev->set_vga_decode) {
1249 new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state); 1260 new_decodes = vgadev->set_vga_decode(vgadev->cookie,
1261 new_state);
1250 vga_update_device_decodes(vgadev, new_decodes); 1262 vga_update_device_decodes(vgadev, new_decodes);
1251 } 1263 }
1252 } 1264 }
@@ -1300,7 +1312,7 @@ static int __init vga_arb_device_init(void)
1300 1312
1301 rc = misc_register(&vga_arb_device); 1313 rc = misc_register(&vga_arb_device);
1302 if (rc < 0) 1314 if (rc < 0)
1303 pr_err("vgaarb: error %d registering device\n", rc); 1315 pr_err("error %d registering device\n", rc);
1304 1316
1305 bus_register_notifier(&pci_bus_type, &pci_notifier); 1317 bus_register_notifier(&pci_bus_type, &pci_notifier);
1306 1318
@@ -1312,21 +1324,29 @@ static int __init vga_arb_device_init(void)
1312 PCI_ANY_ID, pdev)) != NULL) 1324 PCI_ANY_ID, pdev)) != NULL)
1313 vga_arbiter_add_pci_device(pdev); 1325 vga_arbiter_add_pci_device(pdev);
1314 1326
1315 pr_info("vgaarb: loaded\n"); 1327 pr_info("loaded\n");
1316 1328
1317 list_for_each_entry(vgadev, &vga_list, list) { 1329 list_for_each_entry(vgadev, &vga_list, list) {
1318#if defined(CONFIG_X86) || defined(CONFIG_IA64) 1330#if defined(CONFIG_X86) || defined(CONFIG_IA64)
1319 /* Override I/O based detection done by vga_arbiter_add_pci_device() 1331 /*
1320 * as it may take the wrong device (e.g. on Apple system under EFI). 1332 * Override vga_arbiter_add_pci_device()'s I/O based detection
1333 * as it may take the wrong device (e.g. on Apple system under
1334 * EFI).
1321 * 1335 *
1322 * Select the device owning the boot framebuffer if there is one. 1336 * Select the device owning the boot framebuffer if there is
1337 * one.
1323 */ 1338 */
1324 resource_size_t start, end; 1339 resource_size_t start, end, limit;
1340 unsigned long flags;
1325 int i; 1341 int i;
1326 1342
1343 limit = screen_info.lfb_base + screen_info.lfb_size;
1344
1327 /* Does firmware framebuffer belong to us? */ 1345 /* Does firmware framebuffer belong to us? */
1328 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1346 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1329 if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM)) 1347 flags = pci_resource_flags(vgadev->pdev, i);
1348
1349 if ((flags & IORESOURCE_MEM) == 0)
1330 continue; 1350 continue;
1331 1351
1332 start = pci_resource_start(vgadev->pdev, i); 1352 start = pci_resource_start(vgadev->pdev, i);
@@ -1335,22 +1355,24 @@ static int __init vga_arb_device_init(void)
1335 if (!start || !end) 1355 if (!start || !end)
1336 continue; 1356 continue;
1337 1357
1338 if (screen_info.lfb_base < start || 1358 if (screen_info.lfb_base < start || limit >= end)
1339 (screen_info.lfb_base + screen_info.lfb_size) >= end)
1340 continue; 1359 continue;
1360
1341 if (!vga_default_device()) 1361 if (!vga_default_device())
1342 pr_info("vgaarb: setting as boot device: PCI:%s\n", 1362 pr_info("setting as boot device: PCI:%s\n",
1343 pci_name(vgadev->pdev)); 1363 pci_name(vgadev->pdev));
1344 else if (vgadev->pdev != vga_default_device()) 1364 else if (vgadev->pdev != vga_default_device())
1345 pr_info("vgaarb: overriding boot device: PCI:%s\n", 1365 pr_info("overriding boot device: PCI:%s\n",
1346 pci_name(vgadev->pdev)); 1366 pci_name(vgadev->pdev));
1347 vga_set_default_device(vgadev->pdev); 1367 vga_set_default_device(vgadev->pdev);
1348 } 1368 }
1349#endif 1369#endif
1350 if (vgadev->bridge_has_one_vga) 1370 if (vgadev->bridge_has_one_vga)
1351 pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev)); 1371 pr_info("bridge control possible %s\n",
1372 pci_name(vgadev->pdev));
1352 else 1373 else
1353 pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev)); 1374 pr_info("no bridge control possible %s\n",
1375 pci_name(vgadev->pdev));
1354 } 1376 }
1355 return rc; 1377 return rc;
1356} 1378}
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..e3c63640df73 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
462 462
463static void hidinput_cleanup_battery(struct hid_device *dev) 463static void hidinput_cleanup_battery(struct hid_device *dev)
464{ 464{
465 const struct power_supply_desc *psy_desc;
466
465 if (!dev->battery) 467 if (!dev->battery)
466 return; 468 return;
467 469
470 psy_desc = dev->battery->desc;
468 power_supply_unregister(dev->battery); 471 power_supply_unregister(dev->battery);
469 kfree(dev->battery->desc->name); 472 kfree(psy_desc->name);
470 kfree(dev->battery->desc); 473 kfree(psy_desc);
471 dev->battery = NULL; 474 dev->battery = NULL;
472} 475}
473#else /* !CONFIG_HID_BATTERY_STRENGTH */ 476#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
858 for (p = drvdata->rdesc; 858 for (p = drvdata->rdesc;
859 p <= drvdata->rdesc + drvdata->rsize - 4;) { 859 p <= drvdata->rdesc + drvdata->rsize - 4;) {
860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && 860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
861 p[3] < sizeof(params)) { 861 p[3] < ARRAY_SIZE(params)) {
862 v = params[p[3]]; 862 v = params[p[3]];
863 put_unaligned(cpu_to_le32(v), (s32 *)p); 863 put_unaligned(cpu_to_le32(v), (s32 *)p);
864 p += 4; 864 p += 4;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 44958d79d598..01b937e63cf3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1284,6 +1284,39 @@ fail_register_pen_input:
1284 return error; 1284 return error;
1285} 1285}
1286 1286
1287/*
1288 * Not all devices report physical dimensions from HID.
1289 * Compute the default from hardcoded logical dimension
1290 * and resolution before driver overwrites them.
1291 */
1292static void wacom_set_default_phy(struct wacom_features *features)
1293{
1294 if (features->x_resolution) {
1295 features->x_phy = (features->x_max * 100) /
1296 features->x_resolution;
1297 features->y_phy = (features->y_max * 100) /
1298 features->y_resolution;
1299 }
1300}
1301
1302static void wacom_calculate_res(struct wacom_features *features)
1303{
1304 /* set unit to "100th of a mm" for devices not reported by HID */
1305 if (!features->unit) {
1306 features->unit = 0x11;
1307 features->unitExpo = -3;
1308 }
1309
1310 features->x_resolution = wacom_calc_hid_res(features->x_max,
1311 features->x_phy,
1312 features->unit,
1313 features->unitExpo);
1314 features->y_resolution = wacom_calc_hid_res(features->y_max,
1315 features->y_phy,
1316 features->unit,
1317 features->unitExpo);
1318}
1319
1287static void wacom_wireless_work(struct work_struct *work) 1320static void wacom_wireless_work(struct work_struct *work)
1288{ 1321{
1289 struct wacom *wacom = container_of(work, struct wacom, work); 1322 struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
1341 if (wacom_wac1->features.type != INTUOSHT && 1374 if (wacom_wac1->features.type != INTUOSHT &&
1342 wacom_wac1->features.type != BAMBOO_PT) 1375 wacom_wac1->features.type != BAMBOO_PT)
1343 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; 1376 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
1377 wacom_set_default_phy(&wacom_wac1->features);
1378 wacom_calculate_res(&wacom_wac1->features);
1344 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", 1379 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
1345 wacom_wac1->features.name); 1380 wacom_wac1->features.name);
1346 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", 1381 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
1359 wacom_wac2->features = 1394 wacom_wac2->features =
1360 *((struct wacom_features *)id->driver_data); 1395 *((struct wacom_features *)id->driver_data);
1361 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; 1396 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
1397 wacom_set_default_phy(&wacom_wac2->features);
1362 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; 1398 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
1399 wacom_calculate_res(&wacom_wac2->features);
1363 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, 1400 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
1364 "%s (WL) Finger",wacom_wac2->features.name); 1401 "%s (WL) Finger",wacom_wac2->features.name);
1365 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, 1402 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
1407 } 1444 }
1408} 1445}
1409 1446
1410/*
1411 * Not all devices report physical dimensions from HID.
1412 * Compute the default from hardcoded logical dimension
1413 * and resolution before driver overwrites them.
1414 */
1415static void wacom_set_default_phy(struct wacom_features *features)
1416{
1417 if (features->x_resolution) {
1418 features->x_phy = (features->x_max * 100) /
1419 features->x_resolution;
1420 features->y_phy = (features->y_max * 100) /
1421 features->y_resolution;
1422 }
1423}
1424
1425static void wacom_calculate_res(struct wacom_features *features)
1426{
1427 /* set unit to "100th of a mm" for devices not reported by HID */
1428 if (!features->unit) {
1429 features->unit = 0x11;
1430 features->unitExpo = -3;
1431 }
1432
1433 features->x_resolution = wacom_calc_hid_res(features->x_max,
1434 features->x_phy,
1435 features->unit,
1436 features->unitExpo);
1437 features->y_resolution = wacom_calc_hid_res(features->y_max,
1438 features->y_phy,
1439 features->unit,
1440 features->unitExpo);
1441}
1442
1443static size_t wacom_compute_pktlen(struct hid_device *hdev) 1447static size_t wacom_compute_pktlen(struct hid_device *hdev)
1444{ 1448{
1445 struct hid_report_enum *report_enum; 1449 struct hid_report_enum *report_enum;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48f07cd..92d518382a9f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
814 printk(KERN_ERR MOD 814 printk(KERN_ERR MOD
815 "Unexpected cqe_status 0x%x for QPID=0x%0x\n", 815 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
816 CQE_STATUS(&cqe), CQE_QPID(&cqe)); 816 CQE_STATUS(&cqe), CQE_QPID(&cqe));
817 ret = -EINVAL; 817 wc->status = IB_WC_FATAL_ERR;
818 } 818 }
819 } 819 }
820out: 820out:
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d7216d98e..c6dc644aa580 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
246 * convert it to descriptor. 246 * convert it to descriptor.
247 */ 247 */
248 if (!button->gpiod && gpio_is_valid(button->gpio)) { 248 if (!button->gpiod && gpio_is_valid(button->gpio)) {
249 unsigned flags = 0; 249 unsigned flags = GPIOF_IN;
250 250
251 if (button->active_low) 251 if (button->active_low)
252 flags |= GPIOF_ACTIVE_LOW; 252 flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2bc8197..c12bb93334ff 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
68 .irq_mask = irq_chip_mask_parent, 68 .irq_mask = irq_chip_mask_parent,
69 .irq_unmask = irq_chip_unmask_parent, 69 .irq_unmask = irq_chip_unmask_parent,
70 .irq_retrigger = irq_chip_retrigger_hierarchy, 70 .irq_retrigger = irq_chip_retrigger_hierarchy,
71 .irq_set_wake = irq_chip_set_wake_parent, 71 .irq_set_type = irq_chip_set_type_parent,
72 .flags = IRQCHIP_MASK_ON_SUSPEND |
73 IRQCHIP_SKIP_SET_WAKE,
72#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
73 .irq_set_affinity = irq_chip_set_affinity_parent, 75 .irq_set_affinity = irq_chip_set_affinity_parent,
74#endif 76#endif
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1472MODULE_LICENSE("GPL"); 1472MODULE_LICENSE("GPL");
1473MODULE_DESCRIPTION("mq cache policy"); 1473MODULE_DESCRIPTION("mq cache policy");
1474
1475MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 48a4a826ae07..200366c62231 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1790MODULE_LICENSE("GPL"); 1790MODULE_LICENSE("GPL");
1791MODULE_DESCRIPTION("smq cache policy"); 1791MODULE_DESCRIPTION("smq cache policy");
1792
1793MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1293 return r; 1293 return r;
1294 1294
1295 disk_super = dm_block_data(copy); 1295 disk_super = dm_block_data(copy);
1296 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); 1296 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1297 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); 1297 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1298 dm_sm_dec_block(pmd->metadata_sm, held_root); 1298 dm_sm_dec_block(pmd->metadata_sm, held_root);
1299 1299
1300 return dm_tm_unlock(pmd->tm, copy); 1300 return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
138 138
139extern struct dm_block_validator btree_node_validator; 139extern struct dm_block_validator btree_node_validator;
140 140
141/*
142 * Value type for upper levels of multi-level btrees.
143 */
144extern void init_le64_type(struct dm_transaction_manager *tm,
145 struct dm_btree_value_type *vt);
146
141#endif /* DM_BTREE_INTERNAL_H */ 147#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 9ca9eccd512f..4222f774cf36 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
544 return r; 544 return r;
545} 545}
546 546
547static struct dm_btree_value_type le64_type = {
548 .context = NULL,
549 .size = sizeof(__le64),
550 .inc = NULL,
551 .dec = NULL,
552 .equal = NULL
553};
554
555int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, 547int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
556 uint64_t *keys, dm_block_t *new_root) 548 uint64_t *keys, dm_block_t *new_root)
557{ 549{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
559 int index = 0, r = 0; 551 int index = 0, r = 0;
560 struct shadow_spine spine; 552 struct shadow_spine spine;
561 struct btree_node *n; 553 struct btree_node *n;
554 struct dm_btree_value_type le64_vt;
562 555
556 init_le64_type(info->tm, &le64_vt);
563 init_shadow_spine(&spine, info); 557 init_shadow_spine(&spine, info);
564 for (level = 0; level < info->levels; level++) { 558 for (level = 0; level < info->levels; level++) {
565 r = remove_raw(&spine, info, 559 r = remove_raw(&spine, info,
566 (level == last_level ? 560 (level == last_level ?
567 &info->value_type : &le64_type), 561 &info->value_type : &le64_vt),
568 root, keys[level], (unsigned *)&index); 562 root, keys[level], (unsigned *)&index);
569 if (r < 0) 563 if (r < 0)
570 break; 564 break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
654 int index = 0, r = 0; 648 int index = 0, r = 0;
655 struct shadow_spine spine; 649 struct shadow_spine spine;
656 struct btree_node *n; 650 struct btree_node *n;
651 struct dm_btree_value_type le64_vt;
657 uint64_t k; 652 uint64_t k;
658 653
654 init_le64_type(info->tm, &le64_vt);
659 init_shadow_spine(&spine, info); 655 init_shadow_spine(&spine, info);
660 for (level = 0; level < last_level; level++) { 656 for (level = 0; level < last_level; level++) {
661 r = remove_raw(&spine, info, &le64_type, 657 r = remove_raw(&spine, info, &le64_vt,
662 root, keys[level], (unsigned *) &index); 658 root, keys[level], (unsigned *) &index);
663 if (r < 0) 659 if (r < 0)
664 goto out; 660 goto out;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
249{ 249{
250 return s->root; 250 return s->root;
251} 251}
252
253static void le64_inc(void *context, const void *value_le)
254{
255 struct dm_transaction_manager *tm = context;
256 __le64 v_le;
257
258 memcpy(&v_le, value_le, sizeof(v_le));
259 dm_tm_inc(tm, le64_to_cpu(v_le));
260}
261
262static void le64_dec(void *context, const void *value_le)
263{
264 struct dm_transaction_manager *tm = context;
265 __le64 v_le;
266
267 memcpy(&v_le, value_le, sizeof(v_le));
268 dm_tm_dec(tm, le64_to_cpu(v_le));
269}
270
271static int le64_equal(void *context, const void *value1_le, const void *value2_le)
272{
273 __le64 v1_le, v2_le;
274
275 memcpy(&v1_le, value1_le, sizeof(v1_le));
276 memcpy(&v2_le, value2_le, sizeof(v2_le));
277 return v1_le == v2_le;
278}
279
280void init_le64_type(struct dm_transaction_manager *tm,
281 struct dm_btree_value_type *vt)
282{
283 vt->context = tm;
284 vt->size = sizeof(__le64);
285 vt->inc = le64_inc;
286 vt->dec = le64_dec;
287 vt->equal = le64_equal;
288}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index fdd3793e22f9..c7726cebc495 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
667 struct btree_node *n; 667 struct btree_node *n;
668 struct dm_btree_value_type le64_type; 668 struct dm_btree_value_type le64_type;
669 669
670 le64_type.context = NULL; 670 init_le64_type(info->tm, &le64_type);
671 le64_type.size = sizeof(__le64);
672 le64_type.inc = NULL;
673 le64_type.dec = NULL;
674 le64_type.equal = NULL;
675
676 init_shadow_spine(&spine, info); 671 init_shadow_spine(&spine, info);
677 672
678 for (level = 0; level < (info->levels - 1); level++) { 673 for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f5850ff1..5ab90f36a6a6 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -240,7 +240,7 @@ config DVB_SI21XX
240 240
241config DVB_TS2020 241config DVB_TS2020
242 tristate "Montage Tehnology TS2020 based tuners" 242 tristate "Montage Tehnology TS2020 based tuners"
243 depends on DVB_CORE 243 depends on DVB_CORE && I2C
244 select REGMAP_I2C 244 select REGMAP_I2C
245 default m if !MEDIA_SUBDRV_AUTOSELECT 245 default m if !MEDIA_SUBDRV_AUTOSELECT
246 help 246 help
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c3c386..6a1c0089bb62 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_COBALT
2 tristate "Cisco Cobalt support" 2 tristate "Cisco Cobalt support"
3 depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER 3 depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
4 depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB 4 depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
5 depends on SND
5 select I2C_ALGOBIT 6 select I2C_ALGOBIT
6 select VIDEO_ADV7604 7 select VIDEO_ADV7604
7 select VIDEO_ADV7511 8 select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9cf339..d1f5898d11ba 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@ done:
139 also know about dropped frames. */ 139 also know about dropped frames. */
140 cb->vb.v4l2_buf.sequence = s->sequence++; 140 cb->vb.v4l2_buf.sequence = s->sequence++;
141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? 141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
142 VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE); 142 VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
143} 143}
144 144
145irqreturn_t cobalt_irq_handler(int irq, void *dev_id) 145irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e039f7..87990ece5848 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,10 +130,11 @@ err:
130 130
131int mantis_dma_init(struct mantis_pci *mantis) 131int mantis_dma_init(struct mantis_pci *mantis)
132{ 132{
133 int err = 0; 133 int err;
134 134
135 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); 135 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
136 if (mantis_alloc_buffers(mantis) < 0) { 136 err = mantis_alloc_buffers(mantis);
137 if (err < 0) {
137 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); 138 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
138 139
139 /* Stop RISC Engine */ 140 /* Stop RISC Engine */
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd74391..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@ out:
184 return -EINVAL; 184 return -EINVAL;
185} 185}
186 186
187static struct ir_raw_timings_manchester ir_rc5_timings = {
188 .leader = RC5_UNIT,
189 .pulse_space_start = 0,
190 .clock = RC5_UNIT,
191 .trailer_space = RC5_UNIT * 10,
192};
193
194static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
195 {
196 .leader = RC5_UNIT,
197 .pulse_space_start = 0,
198 .clock = RC5_UNIT,
199 .trailer_space = RC5X_SPACE,
200 },
201 {
202 .clock = RC5_UNIT,
203 .trailer_space = RC5_UNIT * 10,
204 },
205};
206
207static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
208 .leader = RC5_UNIT,
209 .pulse_space_start = 0,
210 .clock = RC5_UNIT,
211 .trailer_space = RC5_UNIT * 10,
212};
213
214static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
215 unsigned int important_bits)
216{
217 /* all important bits of scancode should be set in mask */
218 if (~scancode->mask & important_bits)
219 return -EINVAL;
220 /* extra bits in mask should be zero in data */
221 if (scancode->mask & scancode->data & ~important_bits)
222 return -EINVAL;
223 return 0;
224}
225
226/**
227 * ir_rc5_encode() - Encode a scancode as a stream of raw events
228 *
229 * @protocols: allowed protocols
230 * @scancode: scancode filter describing scancode (helps distinguish between
231 * protocol subtypes when scancode is ambiguous)
232 * @events: array of raw ir events to write into
233 * @max: maximum size of @events
234 *
235 * Returns: The number of events written.
236 * -ENOBUFS if there isn't enough space in the array to fit the
237 * encoding. In this case all @max events will have been written.
238 * -EINVAL if the scancode is ambiguous or invalid.
239 */
240static int ir_rc5_encode(u64 protocols,
241 const struct rc_scancode_filter *scancode,
242 struct ir_raw_event *events, unsigned int max)
243{
244 int ret;
245 struct ir_raw_event *e = events;
246 unsigned int data, xdata, command, commandx, system;
247
248 /* Detect protocol and convert scancode to raw data */
249 if (protocols & RC_BIT_RC5 &&
250 !ir_rc5_validate_filter(scancode, 0x1f7f)) {
251 /* decode scancode */
252 command = (scancode->data & 0x003f) >> 0;
253 commandx = (scancode->data & 0x0040) >> 6;
254 system = (scancode->data & 0x1f00) >> 8;
255 /* encode data */
256 data = !commandx << 12 | system << 6 | command;
257
258 /* Modulate the data */
259 ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
260 data);
261 if (ret < 0)
262 return ret;
263 } else if (protocols & RC_BIT_RC5X &&
264 !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
265 /* decode scancode */
266 xdata = (scancode->data & 0x00003f) >> 0;
267 command = (scancode->data & 0x003f00) >> 8;
268 commandx = (scancode->data & 0x004000) >> 14;
269 system = (scancode->data & 0x1f0000) >> 16;
270 /* commandx and system overlap, bits must match when encoded */
271 if (commandx == (system & 0x1))
272 return -EINVAL;
273 /* encode data */
274 data = 1 << 18 | system << 12 | command << 6 | xdata;
275
276 /* Modulate the data */
277 ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
278 CHECK_RC5X_NBITS,
279 data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
280 if (ret < 0)
281 return ret;
282 ret = ir_raw_gen_manchester(&e, max - (e - events),
283 &ir_rc5x_timings[1],
284 RC5X_NBITS - CHECK_RC5X_NBITS,
285 data);
286 if (ret < 0)
287 return ret;
288 } else if (protocols & RC_BIT_RC5_SZ &&
289 !ir_rc5_validate_filter(scancode, 0x2fff)) {
290 /* RC5-SZ scancode is raw enough for Manchester as it is */
291 ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
292 RC5_SZ_NBITS, scancode->data & 0x2fff);
293 if (ret < 0)
294 return ret;
295 } else {
296 return -EINVAL;
297 }
298
299 return e - events;
300}
301
302static struct ir_raw_handler rc5_handler = { 187static struct ir_raw_handler rc5_handler = {
303 .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, 188 .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
304 .decode = ir_rc5_decode, 189 .decode = ir_rc5_decode,
305 .encode = ir_rc5_encode,
306}; 190};
307 191
308static int __init ir_rc5_decode_init(void) 192static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70baf6e0c..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@ out:
291 return -EINVAL; 291 return -EINVAL;
292} 292}
293 293
294static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
295 {
296 .leader = RC6_PREFIX_PULSE,
297 .pulse_space_start = 0,
298 .clock = RC6_UNIT,
299 .invert = 1,
300 .trailer_space = RC6_PREFIX_SPACE,
301 },
302 {
303 .clock = RC6_UNIT,
304 .invert = 1,
305 },
306 {
307 .clock = RC6_UNIT * 2,
308 .invert = 1,
309 },
310 {
311 .clock = RC6_UNIT,
312 .invert = 1,
313 .trailer_space = RC6_SUFFIX_SPACE,
314 },
315};
316
317static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
318 unsigned int important_bits)
319{
320 /* all important bits of scancode should be set in mask */
321 if (~scancode->mask & important_bits)
322 return -EINVAL;
323 /* extra bits in mask should be zero in data */
324 if (scancode->mask & scancode->data & ~important_bits)
325 return -EINVAL;
326 return 0;
327}
328
329/**
330 * ir_rc6_encode() - Encode a scancode as a stream of raw events
331 *
332 * @protocols: allowed protocols
333 * @scancode: scancode filter describing scancode (helps distinguish between
334 * protocol subtypes when scancode is ambiguous)
335 * @events: array of raw ir events to write into
336 * @max: maximum size of @events
337 *
338 * Returns: The number of events written.
339 * -ENOBUFS if there isn't enough space in the array to fit the
340 * encoding. In this case all @max events will have been written.
341 * -EINVAL if the scancode is ambiguous or invalid.
342 */
343static int ir_rc6_encode(u64 protocols,
344 const struct rc_scancode_filter *scancode,
345 struct ir_raw_event *events, unsigned int max)
346{
347 int ret;
348 struct ir_raw_event *e = events;
349
350 if (protocols & RC_BIT_RC6_0 &&
351 !ir_rc6_validate_filter(scancode, 0xffff)) {
352
353 /* Modulate the preamble */
354 ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
355 if (ret < 0)
356 return ret;
357
358 /* Modulate the header (Start Bit & Mode-0) */
359 ret = ir_raw_gen_manchester(&e, max - (e - events),
360 &ir_rc6_timings[1],
361 RC6_HEADER_NBITS, (1 << 3));
362 if (ret < 0)
363 return ret;
364
365 /* Modulate Trailer Bit */
366 ret = ir_raw_gen_manchester(&e, max - (e - events),
367 &ir_rc6_timings[2], 1, 0);
368 if (ret < 0)
369 return ret;
370
371 /* Modulate rest of the data */
372 ret = ir_raw_gen_manchester(&e, max - (e - events),
373 &ir_rc6_timings[3], RC6_0_NBITS,
374 scancode->data);
375 if (ret < 0)
376 return ret;
377
378 } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
379 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
380 !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
381
382 /* Modulate the preamble */
383 ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
384 if (ret < 0)
385 return ret;
386
387 /* Modulate the header (Start Bit & Header-version 6 */
388 ret = ir_raw_gen_manchester(&e, max - (e - events),
389 &ir_rc6_timings[1],
390 RC6_HEADER_NBITS, (1 << 3 | 6));
391 if (ret < 0)
392 return ret;
393
394 /* Modulate Trailer Bit */
395 ret = ir_raw_gen_manchester(&e, max - (e - events),
396 &ir_rc6_timings[2], 1, 0);
397 if (ret < 0)
398 return ret;
399
400 /* Modulate rest of the data */
401 ret = ir_raw_gen_manchester(&e, max - (e - events),
402 &ir_rc6_timings[3],
403 fls(scancode->mask),
404 scancode->data);
405 if (ret < 0)
406 return ret;
407
408 } else {
409 return -EINVAL;
410 }
411
412 return e - events;
413}
414
415static struct ir_raw_handler rc6_handler = { 294static struct ir_raw_handler rc6_handler = {
416 .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | 295 .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
417 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | 296 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
418 RC_BIT_RC6_MCE, 297 RC_BIT_RC6_MCE,
419 .decode = ir_rc6_decode, 298 .decode = ir_rc6_decode,
420 .encode = ir_rc6_encode,
421}; 299};
422 300
423static int __init ir_rc6_decode_init(void) 301static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb5971fd52..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
526 return 0; 526 return 0;
527} 527}
528 528
529static int nvt_write_wakeup_codes(struct rc_dev *dev,
530 const u8 *wakeup_sample_buf, int count)
531{
532 int i = 0;
533 u8 reg, reg_learn_mode;
534 unsigned long flags;
535 struct nvt_dev *nvt = dev->priv;
536
537 nvt_dbg_wake("writing wakeup samples");
538
539 reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
540 reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
541 reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
542
543 /* Lock the learn area to prevent racing with wake-isr */
544 spin_lock_irqsave(&nvt->nvt_lock, flags);
545
546 /* Enable fifo writes */
547 nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
548
549 /* Clear cir wake rx fifo */
550 nvt_clear_cir_wake_fifo(nvt);
551
552 if (count > WAKE_FIFO_LEN) {
553 nvt_dbg_wake("HW FIFO too small for all wake samples");
554 count = WAKE_FIFO_LEN;
555 }
556
557 if (count)
558 pr_info("Wake samples (%d) =", count);
559 else
560 pr_info("Wake sample fifo cleared");
561
562 /* Write wake samples to fifo */
563 for (i = 0; i < count; i++) {
564 pr_cont(" %02x", wakeup_sample_buf[i]);
565 nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
566 CIR_WAKE_WR_FIFO_DATA);
567 }
568 pr_cont("\n");
569
570 /* Switch cir to wakeup mode and disable fifo writing */
571 nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
572
573 /* Set number of bytes needed for wake */
574 nvt_cir_wake_reg_write(nvt, count ? count :
575 CIR_WAKE_FIFO_CMP_BYTES,
576 CIR_WAKE_FIFO_CMP_DEEP);
577
578 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
579
580 return 0;
581}
582
583static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
584 struct rc_scancode_filter *sc_filter)
585{
586 u8 *reg_buf;
587 u8 buf_val;
588 int i, ret, count;
589 unsigned int val;
590 struct ir_raw_event *raw;
591 bool complete;
592
593 /* Require both mask and data to be set before actually committing */
594 if (!sc_filter->mask || !sc_filter->data)
595 return 0;
596
597 raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
598 if (!raw)
599 return -ENOMEM;
600
601 ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
602 raw, WAKE_FIFO_LEN);
603 complete = (ret != -ENOBUFS);
604 if (!complete)
605 ret = WAKE_FIFO_LEN;
606 else if (ret < 0)
607 goto out_raw;
608
609 reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
610 if (!reg_buf) {
611 ret = -ENOMEM;
612 goto out_raw;
613 }
614
615 /* Inspect the ir samples */
616 for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
617 val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
618
619 /* Split too large values into several smaller ones */
620 while (val > 0 && count < WAKE_FIFO_LEN) {
621
622 /* Skip last value for better comparison tolerance */
623 if (complete && i == ret - 1 && val < BUF_LEN_MASK)
624 break;
625
626 /* Clamp values to BUF_LEN_MASK at most */
627 buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
628
629 reg_buf[count] = buf_val;
630 val -= buf_val;
631 if ((raw[i]).pulse)
632 reg_buf[count] |= BUF_PULSE_BIT;
633 count++;
634 }
635 }
636
637 ret = nvt_write_wakeup_codes(dev, reg_buf, count);
638
639 kfree(reg_buf);
640out_raw:
641 kfree(raw);
642
643 return ret;
644}
645
646/* Dummy implementation. nuvoton is agnostic to the protocol used */
647static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
648 u64 *rc_type)
649{
650 return 0;
651}
652
653/* 529/*
654 * nvt_tx_ir 530 * nvt_tx_ir
655 * 531 *
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1167 /* Set up the rc device */ 1043 /* Set up the rc device */
1168 rdev->priv = nvt; 1044 rdev->priv = nvt;
1169 rdev->driver_type = RC_DRIVER_IR_RAW; 1045 rdev->driver_type = RC_DRIVER_IR_RAW;
1170 rdev->encode_wakeup = true;
1171 rdev->allowed_protocols = RC_BIT_ALL; 1046 rdev->allowed_protocols = RC_BIT_ALL;
1172 rdev->open = nvt_open; 1047 rdev->open = nvt_open;
1173 rdev->close = nvt_close; 1048 rdev->close = nvt_close;
1174 rdev->tx_ir = nvt_tx_ir; 1049 rdev->tx_ir = nvt_tx_ir;
1175 rdev->s_tx_carrier = nvt_set_tx_carrier; 1050 rdev->s_tx_carrier = nvt_set_tx_carrier;
1176 rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
1177 rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
1178 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; 1051 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1179 rdev->input_phys = "nuvoton/cir0"; 1052 rdev->input_phys = "nuvoton/cir0";
1180 rdev->input_id.bustype = BUS_HOST; 1053 rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161c2a88..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@ static int debug;
63 */ 63 */
64#define TX_BUF_LEN 256 64#define TX_BUF_LEN 256
65#define RX_BUF_LEN 32 65#define RX_BUF_LEN 32
66#define WAKE_FIFO_LEN 67
67 66
68struct nvt_dev { 67struct nvt_dev {
69 struct pnp_dev *pdev; 68 struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa2f2a7..b68d4f762734 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@ struct ir_raw_handler {
25 25
26 u64 protocols; /* which are handled by this handler */ 26 u64 protocols; /* which are handled by this handler */
27 int (*decode)(struct rc_dev *dev, struct ir_raw_event event); 27 int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
28 int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
29 struct ir_raw_event *events, unsigned int max);
30 28
31 /* These two should only be used by the lirc decoder */ 29 /* These two should only be used by the lirc decoder */
32 int (*raw_register)(struct rc_dev *dev); 30 int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
152#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) 150#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
153#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") 151#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
154 152
155/* functions for IR encoders */
156
157static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
158 unsigned int pulse,
159 u32 duration)
160{
161 init_ir_raw_event(ev);
162 ev->duration = duration;
163 ev->pulse = pulse;
164}
165
166/**
167 * struct ir_raw_timings_manchester - Manchester coding timings
168 * @leader: duration of leader pulse (if any) 0 if continuing
169 * existing signal (see @pulse_space_start)
170 * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
171 * @clock: duration of each pulse/space in ns
172 * @invert: if set clock logic is inverted
173 * (0 = space + pulse, 1 = pulse + space)
174 * @trailer_space: duration of trailer space in ns
175 */
176struct ir_raw_timings_manchester {
177 unsigned int leader;
178 unsigned int pulse_space_start:1;
179 unsigned int clock;
180 unsigned int invert:1;
181 unsigned int trailer_space;
182};
183
184int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
185 const struct ir_raw_timings_manchester *timings,
186 unsigned int n, unsigned int data);
187
188/* 153/*
189 * Routines from rc-raw.c to be used internally and by decoders 154 * Routines from rc-raw.c to be used internally and by decoders
190 */ 155 */
191u64 ir_raw_get_allowed_protocols(void); 156u64 ir_raw_get_allowed_protocols(void);
192u64 ir_raw_get_encode_protocols(void);
193int ir_raw_event_register(struct rc_dev *dev); 157int ir_raw_event_register(struct rc_dev *dev);
194void ir_raw_event_unregister(struct rc_dev *dev); 158void ir_raw_event_unregister(struct rc_dev *dev);
195int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); 159int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645c731c..b732ac6a26d8 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
30static DEFINE_MUTEX(ir_raw_handler_lock); 30static DEFINE_MUTEX(ir_raw_handler_lock);
31static LIST_HEAD(ir_raw_handler_list); 31static LIST_HEAD(ir_raw_handler_list);
32static u64 available_protocols; 32static u64 available_protocols;
33static u64 encode_protocols;
34 33
35static int ir_raw_event_thread(void *data) 34static int ir_raw_event_thread(void *data)
36{ 35{
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
241 return protocols; 240 return protocols;
242} 241}
243 242
244/* used internally by the sysfs interface */
245u64
246ir_raw_get_encode_protocols(void)
247{
248 u64 protocols;
249
250 mutex_lock(&ir_raw_handler_lock);
251 protocols = encode_protocols;
252 mutex_unlock(&ir_raw_handler_lock);
253 return protocols;
254}
255
256static int change_protocol(struct rc_dev *dev, u64 *rc_type) 243static int change_protocol(struct rc_dev *dev, u64 *rc_type)
257{ 244{
258 /* the caller will update dev->enabled_protocols */ 245 /* the caller will update dev->enabled_protocols */
259 return 0; 246 return 0;
260} 247}
261 248
262/**
263 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
264 * @ev: Pointer to pointer to next free event. *@ev is incremented for
265 * each raw event filled.
266 * @max: Maximum number of raw events to fill.
267 * @timings: Manchester modulation timings.
268 * @n: Number of bits of data.
269 * @data: Data bits to encode.
270 *
271 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
272 * modulation with the timing characteristics described by @timings, writing up
273 * to @max raw IR events using the *@ev pointer.
274 *
275 * Returns: 0 on success.
276 * -ENOBUFS if there isn't enough space in the array to fit the
277 * full encoded data. In this case all @max events will have been
278 * written.
279 */
280int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
281 const struct ir_raw_timings_manchester *timings,
282 unsigned int n, unsigned int data)
283{
284 bool need_pulse;
285 unsigned int i;
286 int ret = -ENOBUFS;
287
288 i = 1 << (n - 1);
289
290 if (timings->leader) {
291 if (!max--)
292 return ret;
293 if (timings->pulse_space_start) {
294 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
295
296 if (!max--)
297 return ret;
298 init_ir_raw_event_duration((*ev), 0, timings->leader);
299 } else {
300 init_ir_raw_event_duration((*ev), 1, timings->leader);
301 }
302 i >>= 1;
303 } else {
304 /* continue existing signal */
305 --(*ev);
306 }
307 /* from here on *ev will point to the last event rather than the next */
308
309 while (n && i > 0) {
310 need_pulse = !(data & i);
311 if (timings->invert)
312 need_pulse = !need_pulse;
313 if (need_pulse == !!(*ev)->pulse) {
314 (*ev)->duration += timings->clock;
315 } else {
316 if (!max--)
317 goto nobufs;
318 init_ir_raw_event_duration(++(*ev), need_pulse,
319 timings->clock);
320 }
321
322 if (!max--)
323 goto nobufs;
324 init_ir_raw_event_duration(++(*ev), !need_pulse,
325 timings->clock);
326 i >>= 1;
327 }
328
329 if (timings->trailer_space) {
330 if (!(*ev)->pulse)
331 (*ev)->duration += timings->trailer_space;
332 else if (!max--)
333 goto nobufs;
334 else
335 init_ir_raw_event_duration(++(*ev), 0,
336 timings->trailer_space);
337 }
338
339 ret = 0;
340nobufs:
341 /* point to the next event rather than last event before returning */
342 ++(*ev);
343 return ret;
344}
345EXPORT_SYMBOL(ir_raw_gen_manchester);
346
347/**
348 * ir_raw_encode_scancode() - Encode a scancode as raw events
349 *
350 * @protocols: permitted protocols
351 * @scancode: scancode filter describing a single scancode
352 * @events: array of raw events to write into
353 * @max: max number of raw events
354 *
355 * Attempts to encode the scancode as raw events.
356 *
357 * Returns: The number of events written.
358 * -ENOBUFS if there isn't enough space in the array to fit the
359 * encoding. In this case all @max events will have been written.
360 * -EINVAL if the scancode is ambiguous or invalid, or if no
361 * compatible encoder was found.
362 */
363int ir_raw_encode_scancode(u64 protocols,
364 const struct rc_scancode_filter *scancode,
365 struct ir_raw_event *events, unsigned int max)
366{
367 struct ir_raw_handler *handler;
368 int ret = -EINVAL;
369
370 mutex_lock(&ir_raw_handler_lock);
371 list_for_each_entry(handler, &ir_raw_handler_list, list) {
372 if (handler->protocols & protocols && handler->encode) {
373 ret = handler->encode(protocols, scancode, events, max);
374 if (ret >= 0 || ret == -ENOBUFS)
375 break;
376 }
377 }
378 mutex_unlock(&ir_raw_handler_lock);
379
380 return ret;
381}
382EXPORT_SYMBOL(ir_raw_encode_scancode);
383
384/* 249/*
385 * Used to (un)register raw event clients 250 * Used to (un)register raw event clients
386 */ 251 */
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
463 list_for_each_entry(raw, &ir_raw_client_list, list) 328 list_for_each_entry(raw, &ir_raw_client_list, list)
464 ir_raw_handler->raw_register(raw->dev); 329 ir_raw_handler->raw_register(raw->dev);
465 available_protocols |= ir_raw_handler->protocols; 330 available_protocols |= ir_raw_handler->protocols;
466 if (ir_raw_handler->encode)
467 encode_protocols |= ir_raw_handler->protocols;
468 mutex_unlock(&ir_raw_handler_lock); 331 mutex_unlock(&ir_raw_handler_lock);
469 332
470 return 0; 333 return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
481 list_for_each_entry(raw, &ir_raw_client_list, list) 344 list_for_each_entry(raw, &ir_raw_client_list, list)
482 ir_raw_handler->raw_unregister(raw->dev); 345 ir_raw_handler->raw_unregister(raw->dev);
483 available_protocols &= ~ir_raw_handler->protocols; 346 available_protocols &= ~ir_raw_handler->protocols;
484 if (ir_raw_handler->encode)
485 encode_protocols &= ~ir_raw_handler->protocols;
486 mutex_unlock(&ir_raw_handler_lock); 347 mutex_unlock(&ir_raw_handler_lock);
487} 348}
488EXPORT_SYMBOL(ir_raw_handler_unregister); 349EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63ce985..63dace8198b0 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <media/rc-core.h> 29#include <media/rc-core.h>
31 30
32#define DRIVER_NAME "rc-loopback" 31#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
177 return 0; 176 return 0;
178} 177}
179 178
180static int loop_set_wakeup_filter(struct rc_dev *dev,
181 struct rc_scancode_filter *sc_filter)
182{
183 static const unsigned int max = 512;
184 struct ir_raw_event *raw;
185 int ret;
186 int i;
187
188 /* fine to disable filter */
189 if (!sc_filter->mask)
190 return 0;
191
192 /* encode the specified filter and loop it back */
193 raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
194 ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
195 raw, max);
196 /* still loop back the partial raw IR even if it's incomplete */
197 if (ret == -ENOBUFS)
198 ret = max;
199 if (ret >= 0) {
200 /* do the loopback */
201 for (i = 0; i < ret; ++i)
202 ir_raw_event_store(dev, &raw[i]);
203 ir_raw_event_handle(dev);
204
205 ret = 0;
206 }
207
208 kfree(raw);
209
210 return ret;
211}
212
213static int __init loop_init(void) 179static int __init loop_init(void)
214{ 180{
215 struct rc_dev *rc; 181 struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
229 rc->map_name = RC_MAP_EMPTY; 195 rc->map_name = RC_MAP_EMPTY;
230 rc->priv = &loopdev; 196 rc->priv = &loopdev;
231 rc->driver_type = RC_DRIVER_IR_RAW; 197 rc->driver_type = RC_DRIVER_IR_RAW;
232 rc->encode_wakeup = true;
233 rc->allowed_protocols = RC_BIT_ALL; 198 rc->allowed_protocols = RC_BIT_ALL;
234 rc->timeout = 100 * 1000 * 1000; /* 100 ms */ 199 rc->timeout = 100 * 1000 * 1000; /* 100 ms */
235 rc->min_timeout = 1; 200 rc->min_timeout = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
244 rc->s_idle = loop_set_idle; 209 rc->s_idle = loop_set_idle;
245 rc->s_learning_mode = loop_set_learning_mode; 210 rc->s_learning_mode = loop_set_learning_mode;
246 rc->s_carrier_report = loop_set_carrier_report; 211 rc->s_carrier_report = loop_set_carrier_report;
247 rc->s_wakeup_filter = loop_set_wakeup_filter;
248 212
249 loopdev.txmask = RXMASK_REGULAR; 213 loopdev.txmask = RXMASK_REGULAR;
250 loopdev.txcarrier = 36000; 214 loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db65280..0ff388a16168 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
865 } else { 865 } else {
866 enabled = dev->enabled_wakeup_protocols; 866 enabled = dev->enabled_wakeup_protocols;
867 allowed = dev->allowed_wakeup_protocols; 867 allowed = dev->allowed_wakeup_protocols;
868 if (dev->encode_wakeup && !allowed)
869 allowed = ir_raw_get_encode_protocols();
870 } 868 }
871 869
872 mutex_unlock(&dev->lock); 870 mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
1408 path ? path : "N/A"); 1406 path ? path : "N/A");
1409 kfree(path); 1407 kfree(path);
1410 1408
1411 if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) { 1409 if (dev->driver_type == RC_DRIVER_IR_RAW) {
1412 /* Load raw decoders, if they aren't already */ 1410 /* Load raw decoders, if they aren't already */
1413 if (!raw_init) { 1411 if (!raw_init) {
1414 IR_dprintk(1, "Loading raw decoders\n"); 1412 IR_dprintk(1, "Loading raw decoders\n");
1415 ir_raw_init(); 1413 ir_raw_init();
1416 raw_init = true; 1414 raw_init = true;
1417 } 1415 }
1418 }
1419
1420 if (dev->driver_type == RC_DRIVER_IR_RAW) {
1421 /* calls ir_register_device so unlock mutex here*/ 1416 /* calls ir_register_device so unlock mutex here*/
1422 mutex_unlock(&dev->lock); 1417 mutex_unlock(&dev->lock);
1423 rc = ir_raw_event_register(dev); 1418 rc = ir_raw_event_register(dev);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b315459098..a14c428f70e9 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
715 break; 715 break;
716 case VB2_BUF_STATE_PREPARING: 716 case VB2_BUF_STATE_PREPARING:
717 case VB2_BUF_STATE_DEQUEUED: 717 case VB2_BUF_STATE_DEQUEUED:
718 case VB2_BUF_STATE_REQUEUEING:
718 /* nothing */ 719 /* nothing */
719 break; 720 break;
720 } 721 }
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1182 1183
1183 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1184 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1184 state != VB2_BUF_STATE_ERROR && 1185 state != VB2_BUF_STATE_ERROR &&
1185 state != VB2_BUF_STATE_QUEUED)) 1186 state != VB2_BUF_STATE_QUEUED &&
1187 state != VB2_BUF_STATE_REQUEUEING))
1186 state = VB2_BUF_STATE_ERROR; 1188 state = VB2_BUF_STATE_ERROR;
1187 1189
1188#ifdef CONFIG_VIDEO_ADV_DEBUG 1190#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1199 for (plane = 0; plane < vb->num_planes; ++plane) 1201 for (plane = 0; plane < vb->num_planes; ++plane)
1200 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 1202 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
1201 1203
1202 /* Add the buffer to the done buffers list */
1203 spin_lock_irqsave(&q->done_lock, flags); 1204 spin_lock_irqsave(&q->done_lock, flags);
1204 vb->state = state; 1205 if (state == VB2_BUF_STATE_QUEUED ||
1205 if (state != VB2_BUF_STATE_QUEUED) 1206 state == VB2_BUF_STATE_REQUEUEING) {
1207 vb->state = VB2_BUF_STATE_QUEUED;
1208 } else {
1209 /* Add the buffer to the done buffers list */
1206 list_add_tail(&vb->done_entry, &q->done_list); 1210 list_add_tail(&vb->done_entry, &q->done_list);
1211 vb->state = state;
1212 }
1207 atomic_dec(&q->owned_by_drv_count); 1213 atomic_dec(&q->owned_by_drv_count);
1208 spin_unlock_irqrestore(&q->done_lock, flags); 1214 spin_unlock_irqrestore(&q->done_lock, flags);
1209 1215
1210 if (state == VB2_BUF_STATE_QUEUED) { 1216 switch (state) {
1217 case VB2_BUF_STATE_QUEUED:
1218 return;
1219 case VB2_BUF_STATE_REQUEUEING:
1211 if (q->start_streaming_called) 1220 if (q->start_streaming_called)
1212 __enqueue_in_driver(vb); 1221 __enqueue_in_driver(vb);
1213 return; 1222 return;
1223 default:
1224 /* Inform any processes that may be waiting for buffers */
1225 wake_up(&q->done_wq);
1226 break;
1214 } 1227 }
1215
1216 /* Inform any processes that may be waiting for buffers */
1217 wake_up(&q->done_wq);
1218} 1228}
1219EXPORT_SYMBOL_GPL(vb2_buffer_done); 1229EXPORT_SYMBOL_GPL(vb2_buffer_done);
1220 1230
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
1244 1254
1245static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) 1255static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
1246{ 1256{
1247 static bool __check_once __read_mostly; 1257 static bool check_once;
1248 1258
1249 if (__check_once) 1259 if (check_once)
1250 return; 1260 return;
1251 1261
1252 __check_once = true; 1262 check_once = true;
1253 __WARN(); 1263 WARN_ON(1);
1254 1264
1255 pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n"); 1265 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
1256 if (vb->vb2_queue->allow_zero_bytesused) 1266 if (vb->vb2_queue->allow_zero_bytesused)
1257 pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); 1267 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
1258 else 1268 else
1259 pr_warn_once("use the actual size instead.\n"); 1269 pr_warn("use the actual size instead.\n");
1260} 1270}
1261 1271
1262/** 1272/**
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 3a27a84ad3ec..9426276dbe14 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
2245{ 2245{
2246 int i; 2246 int i;
2247 2247
2248 if (!gpmc_base)
2249 return;
2250
2248 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); 2251 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2249 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); 2252 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2250 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); 2253 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
2277{ 2280{
2278 int i; 2281 int i;
2279 2282
2283 if (!gpmc_base)
2284 return;
2285
2280 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); 2286 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2281 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); 2287 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2282 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); 2288 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 379a420245ea..0f0cad8dcaed 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
115 115
116config MFD_CROS_EC_SPI 116config MFD_CROS_EC_SPI
117 tristate "ChromeOS Embedded Controller (SPI)" 117 tristate "ChromeOS Embedded Controller (SPI)"
118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF 118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
119 119
120 ---help--- 120 ---help---
121 If you say Y here, you get support for talking to the ChromeOS EC 121 If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..a72ddb295078 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
651 651
652 arizona->has_fully_powered_off = true; 652 arizona->has_fully_powered_off = true;
653 653
654 disable_irq(arizona->irq); 654 disable_irq_nosync(arizona->irq);
655 arizona_enable_reset(arizona); 655 arizona_enable_reset(arizona);
656 regulator_bulk_disable(arizona->num_core_supplies, 656 regulator_bulk_disable(arizona->num_core_supplies,
657 arizona->core_supplies); 657 arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
1141 arizona->pdata.gpio_defaults[i]); 1141 arizona->pdata.gpio_defaults[i]);
1142 } 1142 }
1143 1143
1144 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1145 pm_runtime_use_autosuspend(arizona->dev);
1146 pm_runtime_enable(arizona->dev);
1147
1148 /* Chip default */ 1144 /* Chip default */
1149 if (!arizona->pdata.clk32k_src) 1145 if (!arizona->pdata.clk32k_src)
1150 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; 1146 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
1245 arizona->pdata.spk_fmt[i]); 1241 arizona->pdata.spk_fmt[i]);
1246 } 1242 }
1247 1243
1244 pm_runtime_set_active(arizona->dev);
1245 pm_runtime_enable(arizona->dev);
1246
1248 /* Set up for interrupts */ 1247 /* Set up for interrupts */
1249 ret = arizona_irq_init(arizona); 1248 ret = arizona_irq_init(arizona);
1250 if (ret != 0) 1249 if (ret != 0)
1251 goto err_reset; 1250 goto err_reset;
1252 1251
1252 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1253 pm_runtime_use_autosuspend(arizona->dev);
1254
1253 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", 1255 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
1254 arizona_clkgen_err, arizona); 1256 arizona_clkgen_err, arizona);
1255 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", 1257 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
1278 goto err_irq; 1280 goto err_irq;
1279 } 1281 }
1280 1282
1281#ifdef CONFIG_PM
1282 regulator_disable(arizona->dcvdd);
1283#endif
1284
1285 return 0; 1283 return 0;
1286 1284
1287err_irq: 1285err_irq:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e1ccefce9a9d..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
786 slave ? slave->dev->name : "NULL"); 786 slave ? slave->dev->name : "NULL");
787 787
788 if (!slave || !bond->send_peer_notif || 788 if (!slave || !bond->send_peer_notif ||
789 !netif_carrier_ok(bond->dev) ||
789 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 790 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
790 return false; 791 return false;
791 792
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2d1ce3c5d0dd..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1764 } 1764 }
1765 if (i != RX_RING_SIZE) { 1765 if (i != RX_RING_SIZE) {
1766 int j;
1767 pr_emerg("%s: no memory for rx ring\n", dev->name); 1766 pr_emerg("%s: no memory for rx ring\n", dev->name);
1768 for (j = 0; j < i; j++) {
1769 if (vp->rx_skbuff[j]) {
1770 dev_kfree_skb(vp->rx_skbuff[j]);
1771 vp->rx_skbuff[j] = NULL;
1772 }
1773 }
1774 retval = -ENOMEM; 1767 retval = -ENOMEM;
1775 goto err_free_irq; 1768 goto err_free_skb;
1776 } 1769 }
1777 /* Wrap the ring. */ 1770 /* Wrap the ring. */
1778 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); 1771 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
1782 if (!retval) 1775 if (!retval)
1783 goto out; 1776 goto out;
1784 1777
1785err_free_irq: 1778err_free_skb:
1779 for (i = 0; i < RX_RING_SIZE; i++) {
1780 if (vp->rx_skbuff[i]) {
1781 dev_kfree_skb(vp->rx_skbuff[i]);
1782 vp->rx_skbuff[i] = NULL;
1783 }
1784 }
1786 free_irq(dev->irq, dev); 1785 free_irq(dev->irq, dev);
1787err: 1786err:
1788 if (vortex_debug > 1) 1787 if (vortex_debug > 1)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
262 if (likely(skb)) { 262 if (likely(skb)) {
263 (*pkts_compl)++; 263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len; 264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
265 } 266 }
266 267
267 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0; 268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL; 269 tx_buf->skb = NULL;
270 270
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1718 offset += sizeof(u32); 1718 offset += sizeof(u32);
1719 data_buf += sizeof(u32); 1719 data_buf += sizeof(u32);
1720 written_so_far += sizeof(u32); 1720 written_so_far += sizeof(u32);
1721
1722 /* At end of each 4Kb page, release nvram lock to allow MFW
1723 * chance to take it for its own use.
1724 */
1725 if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
1726 (written_so_far < buf_size)) {
1727 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1728 "Releasing NVM lock after offset 0x%x\n",
1729 (u32)(offset - sizeof(u32)));
1730 bnx2x_release_nvram_lock(bp);
1731 usleep_range(1000, 2000);
1732 rc = bnx2x_acquire_nvram_lock(bp);
1733 if (rc)
1734 return rc;
1735 }
1736
1721 cmd_flags = 0; 1737 cmd_flags = 0;
1722 } 1738 }
1723 1739
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
676 if (!next_cmpl->valid) 676 if (!next_cmpl->valid)
677 break; 677 break;
678 } 678 }
679 packets++;
679 680
680 /* TODO: BNA_CQ_EF_LOCAL ? */ 681 /* TODO: BNA_CQ_EF_LOCAL ? */
681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 682 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
692 else 693 else
693 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 694 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
694 695
695 packets++;
696 rcb->rxq->rx_packets++; 696 rcb->rxq->rx_packets++;
697 rcb->rxq->rx_bytes += totlen; 697 rcb->rxq->rx_bytes += totlen;
698 ccb->bytes_per_intr += totlen; 698 ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT
19 default ARCH_THUNDER
20 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
21 ---help--- 20 ---help---
22 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
29config THUNDER_NIC_VF 28config THUNDER_NIC_VF
30 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
31 depends on 64BIT 30 depends on 64BIT
32 default ARCH_THUNDER
33 ---help--- 31 ---help---
34 This driver supports Thunder's NIC virtual function 32 This driver supports Thunder's NIC virtual function
35 33
36config THUNDER_NIC_BGX 34config THUNDER_NIC_BGX
37 tristate "Thunder MAC interface driver (BGX)" 35 tristate "Thunder MAC interface driver (BGX)"
38 depends on 64BIT 36 depends on 64BIT
39 default ARCH_THUNDER
40 ---help--- 37 ---help---
41 This driver supports programming and controlling of MAC 38 This driver supports programming and controlling of MAC
42 interface from NIC physical function driver. 39 interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index a11485fbb33f..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
2332 EXT_MEM1_SIZE_G(size)); 2332 EXT_MEM1_SIZE_G(size));
2333 } 2333 }
2334 } else { 2334 } else {
2335 if (i & EXT_MEM_ENABLE_F) 2335 if (i & EXT_MEM_ENABLE_F) {
2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); 2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
2337 add_debugfs_mem(adap, "mc", MEM_MC, 2337 add_debugfs_mem(adap, "mc", MEM_MC,
2338 EXT_MEM_SIZE_G(size)); 2338 EXT_MEM_SIZE_G(size));
2339 }
2339 } 2340 }
2340 2341
2341 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, 2342 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
621 BE_IF_FLAGS_MCAST_PROMISCUOUS) 621 BE_IF_FLAGS_MCAST_PROMISCUOUS)
622 622
623#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
624 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
625
626#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
627
623/* An RX interface is an object with one or more MAC addresses and 628/* An RX interface is an object with one or more MAC addresses and
624 * filtering capabilities. */ 629 * filtering capabilities. */
625struct be_cmd_req_if_create { 630struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..6ca693b03f33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0; 274 return 0;
275 275
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT 280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address. 281 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the 282 * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
307 status = -EPERM; 311 status = -EPERM;
308 goto err; 312 goto err;
309 } 313 }
310 314done:
311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
312 dev_info(dev, "MAC address changed to %pM\n", mac); 316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
313 return 0; 317 return 0;
314err: 318err:
315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); 2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2448} 2452}
2449 2453
2450static void be_rx_cq_clean(struct be_rx_obj *rxo) 2454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2451{ 2456{
2452 struct be_rx_page_info *page_info;
2453 struct be_queue_info *rxq = &rxo->q; 2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
2454 struct be_queue_info *rx_cq = &rxo->cq; 2472 struct be_queue_info *rx_cq = &rxo->cq;
2455 struct be_rx_compl_info *rxcp; 2473 struct be_rx_compl_info *rxcp;
2456 struct be_adapter *adapter = rxo->adapter; 2474 struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2487 2505
2488 /* After cleanup, leave the CQ in unarmed state */ 2506 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0); 2507 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
2498 rxq->tail = 0;
2499 rxq->head = 0;
2500} 2508}
2501 2509
2502static void be_tx_compl_clean(struct be_adapter *adapter) 2510static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2577 napi_hash_del(&eqo->napi); 2585 napi_hash_del(&eqo->napi);
2578 netif_napi_del(&eqo->napi); 2586 netif_napi_del(&eqo->napi);
2587 free_cpumask_var(eqo->affinity_mask);
2579 } 2588 }
2580 free_cpumask_var(eqo->affinity_mask);
2581 be_queue_free(adapter, &eqo->q); 2589 be_queue_free(adapter, &eqo->q);
2582 } 2590 }
2583} 2591}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2594 2602
2595 for_all_evt_queues(adapter, eqo, i) { 2603 for_all_evt_queues(adapter, eqo, i) {
2596 int numa_node = dev_to_node(&adapter->pdev->dev); 2604 int numa_node = dev_to_node(&adapter->pdev->dev);
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2605
2598 return -ENOMEM;
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
2603 napi_hash_add(&eqo->napi);
2604 aic = &adapter->aic_obj[i]; 2606 aic = &adapter->aic_obj[i];
2605 eqo->adapter = adapter; 2607 eqo->adapter = adapter;
2606 eqo->idx = i; 2608 eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2616 rc = be_cmd_eq_create(adapter, eqo); 2618 rc = be_cmd_eq_create(adapter, eqo);
2617 if (rc) 2619 if (rc)
2618 return rc; 2620 return rc;
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
2619 } 2629 }
2620 return 0; 2630 return 0;
2621} 2631}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3354 for_all_rx_queues(adapter, rxo, i) { 3364 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q; 3365 q = &rxo->q;
3356 if (q->created) { 3366 if (q->created) {
3367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
3357 be_cmd_rxq_destroy(adapter, q); 3380 be_cmd_rxq_destroy(adapter, q);
3358 be_rx_cq_clean(rxo); 3381 be_rx_cq_clean(rxo);
3382 be_rxq_clean(rxo);
3359 } 3383 }
3360 be_queue_free(adapter, q); 3384 be_queue_free(adapter, q);
3361 } 3385 }
3362} 3386}
3363 3387
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
3364static int be_close(struct net_device *netdev) 3415static int be_close(struct net_device *netdev)
3365{ 3416{
3366 struct be_adapter *adapter = netdev_priv(netdev); 3417 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) 3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0; 3425 return 0;
3375 3426
3427 be_disable_if_filters(adapter);
3428
3376 be_roce_dev_close(adapter); 3429 be_roce_dev_close(adapter);
3377 3430
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
3392 be_tx_compl_clean(adapter); 3445 be_tx_compl_clean(adapter);
3393 3446
3394 be_rx_qs_destroy(adapter); 3447 be_rx_qs_destroy(adapter);
3395 be_clear_uc_list(adapter);
3396 3448
3397 for_all_evt_queues(adapter, eqo, i) { 3449 for_all_evt_queues(adapter, eqo, i) {
3398 if (msix_enabled(adapter)) 3450 if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3477 return 0; 3529 return 0;
3478} 3530}
3479 3531
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
3480static int be_open(struct net_device *netdev) 3557static int be_open(struct net_device *netdev)
3481{ 3558{
3482 struct be_adapter *adapter = netdev_priv(netdev); 3559 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
3490 if (status) 3567 if (status)
3491 goto err; 3568 goto err;
3492 3569
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
3493 status = be_irq_register(adapter); 3574 status = be_irq_register(adapter);
3494 if (status) 3575 if (status)
3495 goto err; 3576 goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
3686 } 3767 }
3687} 3768}
3688 3769
3689static void be_mac_clear(struct be_adapter *adapter)
3690{
3691 if (adapter->pmac_id) {
3692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
3694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
3699#ifdef CONFIG_BE2NET_VXLAN 3770#ifdef CONFIG_BE2NET_VXLAN
3700static void be_disable_vxlan_offloads(struct be_adapter *adapter) 3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{ 3772{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
3770#ifdef CONFIG_BE2NET_VXLAN 3841#ifdef CONFIG_BE2NET_VXLAN
3771 be_disable_vxlan_offloads(adapter); 3842 be_disable_vxlan_offloads(adapter);
3772#endif 3843#endif
3773 /* delete the primary mac along with the uc-mac list */ 3844 kfree(adapter->pmac_id);
3774 be_mac_clear(adapter); 3845 adapter->pmac_id = NULL;
3775 3846
3776 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3777 3848
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
3782 return 0; 3853 return 0;
3783} 3854}
3784 3855
3785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
3789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
3793
3794 en_flags &= cap_flags;
3795
3796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3797}
3798
3799static int be_vfs_if_create(struct be_adapter *adapter) 3856static int be_vfs_if_create(struct be_adapter *adapter)
3800{ 3857{
3801 struct be_resources res = {0}; 3858 struct be_resources res = {0};
3859 u32 cap_flags, en_flags, vf;
3802 struct be_vf_cfg *vf_cfg; 3860 struct be_vf_cfg *vf_cfg;
3803 u32 cap_flags, vf;
3804 int status; 3861 int status;
3805 3862
3806 /* If a FW profile exists, then cap_flags are updated */ 3863 /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3821 } 3878 }
3822 } 3879 }
3823 3880
3824 status = be_if_create(adapter, &vf_cfg->if_handle, 3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3825 cap_flags, vf + 1); 3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
3826 if (status) 3887 if (status)
3827 return status; 3888 return status;
3828 } 3889 }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
4194 4255
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 } 4258 }
4201 4259
4202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
4206 return 0; 4260 return 0;
4207} 4261}
4208 4262
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
4342static int be_setup(struct be_adapter *adapter) 4396static int be_setup(struct be_adapter *adapter)
4343{ 4397{
4344 struct device *dev = &adapter->pdev->dev; 4398 struct device *dev = &adapter->pdev->dev;
4399 u32 en_flags;
4345 int status; 4400 int status;
4346 4401
4347 status = be_func_init(adapter); 4402 status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
4364 if (status) 4419 if (status)
4365 goto err; 4420 goto err;
4366 4421
4367 status = be_if_create(adapter, &adapter->if_handle, 4422 /* will enable all the needed filter flags in be_open() */
4368 be_if_cap_flags(adapter), 0); 4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
4369 if (status) 4427 if (status)
4370 goto err; 4428 goto err;
4371 4429
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); 4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 } 4450 }
4393 4451
4394 if (adapter->vlans_added)
4395 be_vid_config(adapter);
4396
4397 be_set_rx_mode(adapter->netdev);
4398
4399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc); 4453 adapter->rx_fc);
4401 if (status) 4454 if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5121 struct device *dev = &adapter->pdev->dev; 5174 struct device *dev = &adapter->pdev->dev;
5122 int status; 5175 int status;
5123 5176
5124 if (lancer_chip(adapter) || BEx_chip(adapter)) 5177 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5125 return; 5178 return;
5126 5179
5127 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5168{ 5221{
5169 struct be_adapter *adapter = netdev_priv(netdev); 5222 struct be_adapter *adapter = netdev_priv(netdev);
5170 5223
5171 if (lancer_chip(adapter) || BEx_chip(adapter)) 5224 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5172 return; 5225 return;
5173 5226
5174 if (adapter->vxlan_port != port) 5227 if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 32e3807c650e..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3433,6 +3433,7 @@ fec_probe(struct platform_device *pdev)
3433 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev); 3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_get_noresume(&pdev->dev);
3436 pm_runtime_set_active(&pdev->dev); 3437 pm_runtime_set_active(&pdev->dev);
3437 pm_runtime_enable(&pdev->dev); 3438 pm_runtime_enable(&pdev->dev);
3438 3439
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
586 frag = skb_shinfo(skb)->frags; 586 frag = skb_shinfo(skb)->frags;
587 while (nr_frags) { 587 while (nr_frags) {
588 CBDC_SC(bdp, 588 CBDC_SC(bdp,
589 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); 589 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
590 BD_ENET_TX_TC);
590 CBDS_SC(bdp, BD_ENET_TX_READY); 591 CBDS_SC(bdp, BD_ENET_TX_READY);
591 592
592 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 593 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
110} 110}
111 111
112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) 112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) 113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
114#define FEC_RX_EVENT (FEC_ENET_RXF) 114#define FEC_RX_EVENT (FEC_ENET_RXF)
115#define FEC_TX_EVENT (FEC_ENET_TXF) 115#define FEC_TX_EVENT (FEC_ENET_TXF)
116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ 116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2b7610f341b0..10b3bbbbac8e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
2102 /* Start Rx/Tx DMA and enable the interrupts */ 2102 /* Start Rx/Tx DMA and enable the interrupts */
2103 gfar_start(priv); 2103 gfar_start(priv);
2104 2104
2105 /* force link state update after mac reset */
2106 priv->oldlink = 0;
2107 priv->oldspeed = 0;
2108 priv->oldduplex = -1;
2109
2105 phy_start(priv->phydev); 2110 phy_start(priv->phydev);
2106 2111
2107 enable_napi(priv); 2112 enable_napi(priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c0a8f825b63..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -900,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
900 return 0; 900 return 0;
901} 901}
902 902
903static int gfar_comp_asc(const void *a, const void *b)
904{
905 return memcmp(a, b, 4);
906}
907
908static int gfar_comp_desc(const void *a, const void *b)
909{
910 return -memcmp(a, b, 4);
911}
912
913static void gfar_swap(void *a, void *b, int size)
914{
915 u32 *_a = a;
916 u32 *_b = b;
917
918 swap(_a[0], _b[0]);
919 swap(_a[1], _b[1]);
920 swap(_a[2], _b[2]);
921 swap(_a[3], _b[3]);
922}
923
924/* Write a mask to filer cache */ 903/* Write a mask to filer cache */
925static void gfar_set_mask(u32 mask, struct filer_table *tab) 904static void gfar_set_mask(u32 mask, struct filer_table *tab)
926{ 905{
@@ -1270,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1270 return 0; 1249 return 0;
1271} 1250}
1272 1251
1273/* Copy size filer entries */
1274static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1275 struct gfar_filer_entry src[0], s32 size)
1276{
1277 while (size > 0) {
1278 size--;
1279 dst[size].ctrl = src[size].ctrl;
1280 dst[size].prop = src[size].prop;
1281 }
1282}
1283
1284/* Delete the contents of the filer-table between start and end
1285 * and collapse them
1286 */
1287static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1288{
1289 int length;
1290
1291 if (end > MAX_FILER_CACHE_IDX || end < begin)
1292 return -EINVAL;
1293
1294 end++;
1295 length = end - begin;
1296
1297 /* Copy */
1298 while (end < tab->index) {
1299 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1300 tab->fe[begin++].prop = tab->fe[end++].prop;
1301
1302 }
1303 /* Fill up with don't cares */
1304 while (begin < tab->index) {
1305 tab->fe[begin].ctrl = 0x60;
1306 tab->fe[begin].prop = 0xFFFFFFFF;
1307 begin++;
1308 }
1309
1310 tab->index -= length;
1311 return 0;
1312}
1313
1314/* Make space on the wanted location */
1315static int gfar_expand_filer_entries(u32 begin, u32 length,
1316 struct filer_table *tab)
1317{
1318 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1319 begin > MAX_FILER_CACHE_IDX)
1320 return -EINVAL;
1321
1322 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1323 tab->index - length + 1);
1324
1325 tab->index += length;
1326 return 0;
1327}
1328
1329static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1330{
1331 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1332 start++) {
1333 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1334 (RQFCR_AND | RQFCR_CLE))
1335 return start;
1336 }
1337 return -1;
1338}
1339
1340static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1341{
1342 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1343 start++) {
1344 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1345 (RQFCR_CLE))
1346 return start;
1347 }
1348 return -1;
1349}
1350
1351/* Uses hardwares clustering option to reduce
1352 * the number of filer table entries
1353 */
1354static void gfar_cluster_filer(struct filer_table *tab)
1355{
1356 s32 i = -1, j, iend, jend;
1357
1358 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1359 j = i;
1360 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1361 /* The cluster entries self and the previous one
1362 * (a mask) must be identical!
1363 */
1364 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1365 break;
1366 if (tab->fe[i].prop != tab->fe[j].prop)
1367 break;
1368 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1369 break;
1370 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1371 break;
1372 iend = gfar_get_next_cluster_end(i, tab);
1373 jend = gfar_get_next_cluster_end(j, tab);
1374 if (jend == -1 || iend == -1)
1375 break;
1376
1377 /* First we make some free space, where our cluster
1378 * element should be. Then we copy it there and finally
1379 * delete in from its old location.
1380 */
1381 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1382 -EINVAL)
1383 break;
1384
1385 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1386 &(tab->fe[jend + 1]), jend - j);
1387
1388 if (gfar_trim_filer_entries(jend - 1,
1389 jend + (jend - j),
1390 tab) == -EINVAL)
1391 return;
1392
1393 /* Mask out cluster bit */
1394 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1395 }
1396 }
1397}
1398
1399/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1400static void gfar_swap_bits(struct gfar_filer_entry *a1,
1401 struct gfar_filer_entry *a2,
1402 struct gfar_filer_entry *b1,
1403 struct gfar_filer_entry *b2, u32 mask)
1404{
1405 u32 temp[4];
1406 temp[0] = a1->ctrl & mask;
1407 temp[1] = a2->ctrl & mask;
1408 temp[2] = b1->ctrl & mask;
1409 temp[3] = b2->ctrl & mask;
1410
1411 a1->ctrl &= ~mask;
1412 a2->ctrl &= ~mask;
1413 b1->ctrl &= ~mask;
1414 b2->ctrl &= ~mask;
1415
1416 a1->ctrl |= temp[1];
1417 a2->ctrl |= temp[0];
1418 b1->ctrl |= temp[3];
1419 b2->ctrl |= temp[2];
1420}
1421
1422/* Generate a list consisting of masks values with their start and
1423 * end of validity and block as indicator for parts belonging
1424 * together (glued by ANDs) in mask_table
1425 */
1426static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1427 struct filer_table *tab)
1428{
1429 u32 i, and_index = 0, block_index = 1;
1430
1431 for (i = 0; i < tab->index; i++) {
1432
1433 /* LSByte of control = 0 sets a mask */
1434 if (!(tab->fe[i].ctrl & 0xF)) {
1435 mask_table[and_index].mask = tab->fe[i].prop;
1436 mask_table[and_index].start = i;
1437 mask_table[and_index].block = block_index;
1438 if (and_index >= 1)
1439 mask_table[and_index - 1].end = i - 1;
1440 and_index++;
1441 }
1442 /* cluster starts and ends will be separated because they should
1443 * hold their position
1444 */
1445 if (tab->fe[i].ctrl & RQFCR_CLE)
1446 block_index++;
1447 /* A not set AND indicates the end of a depended block */
1448 if (!(tab->fe[i].ctrl & RQFCR_AND))
1449 block_index++;
1450 }
1451
1452 mask_table[and_index - 1].end = i - 1;
1453
1454 return and_index;
1455}
1456
1457/* Sorts the entries of mask_table by the values of the masks.
1458 * Important: The 0xFF80 flags of the first and last entry of a
1459 * block must hold their position (which queue, CLusterEnable, ReJEct,
1460 * AND)
1461 */
1462static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1463 struct filer_table *temp_table, u32 and_index)
1464{
1465 /* Pointer to compare function (_asc or _desc) */
1466 int (*gfar_comp)(const void *, const void *);
1467
1468 u32 i, size = 0, start = 0, prev = 1;
1469 u32 old_first, old_last, new_first, new_last;
1470
1471 gfar_comp = &gfar_comp_desc;
1472
1473 for (i = 0; i < and_index; i++) {
1474 if (prev != mask_table[i].block) {
1475 old_first = mask_table[start].start + 1;
1476 old_last = mask_table[i - 1].end;
1477 sort(mask_table + start, size,
1478 sizeof(struct gfar_mask_entry),
1479 gfar_comp, &gfar_swap);
1480
1481 /* Toggle order for every block. This makes the
1482 * thing more efficient!
1483 */
1484 if (gfar_comp == gfar_comp_desc)
1485 gfar_comp = &gfar_comp_asc;
1486 else
1487 gfar_comp = &gfar_comp_desc;
1488
1489 new_first = mask_table[start].start + 1;
1490 new_last = mask_table[i - 1].end;
1491
1492 gfar_swap_bits(&temp_table->fe[new_first],
1493 &temp_table->fe[old_first],
1494 &temp_table->fe[new_last],
1495 &temp_table->fe[old_last],
1496 RQFCR_QUEUE | RQFCR_CLE |
1497 RQFCR_RJE | RQFCR_AND);
1498
1499 start = i;
1500 size = 0;
1501 }
1502 size++;
1503 prev = mask_table[i].block;
1504 }
1505}
1506
1507/* Reduces the number of masks needed in the filer table to save entries
1508 * This is done by sorting the masks of a depended block. A depended block is
1509 * identified by gluing ANDs or CLE. The sorting order toggles after every
1510 * block. Of course entries in scope of a mask must change their location with
1511 * it.
1512 */
1513static int gfar_optimize_filer_masks(struct filer_table *tab)
1514{
1515 struct filer_table *temp_table;
1516 struct gfar_mask_entry *mask_table;
1517
1518 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1519 s32 ret = 0;
1520
1521 /* We need a copy of the filer table because
1522 * we want to change its order
1523 */
1524 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1525 if (temp_table == NULL)
1526 return -ENOMEM;
1527
1528 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1529 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1530
1531 if (mask_table == NULL) {
1532 ret = -ENOMEM;
1533 goto end;
1534 }
1535
1536 and_index = gfar_generate_mask_table(mask_table, tab);
1537
1538 gfar_sort_mask_table(mask_table, temp_table, and_index);
1539
1540 /* Now we can copy the data from our duplicated filer table to
1541 * the real one in the order the mask table says
1542 */
1543 for (i = 0; i < and_index; i++) {
1544 size = mask_table[i].end - mask_table[i].start + 1;
1545 gfar_copy_filer_entries(&(tab->fe[j]),
1546 &(temp_table->fe[mask_table[i].start]), size);
1547 j += size;
1548 }
1549
1550 /* And finally we just have to check for duplicated masks and drop the
1551 * second ones
1552 */
1553 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1554 if (tab->fe[i].ctrl == 0x80) {
1555 previous_mask = i++;
1556 break;
1557 }
1558 }
1559 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1560 if (tab->fe[i].ctrl == 0x80) {
1561 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1562 /* Two identical ones found!
1563 * So drop the second one!
1564 */
1565 gfar_trim_filer_entries(i, i, tab);
1566 } else
1567 /* Not identical! */
1568 previous_mask = i;
1569 }
1570 }
1571
1572 kfree(mask_table);
1573end: kfree(temp_table);
1574 return ret;
1575}
1576
1577/* Write the bit-pattern from software's buffer to hardware registers */ 1252/* Write the bit-pattern from software's buffer to hardware registers */
1578static int gfar_write_filer_table(struct gfar_private *priv, 1253static int gfar_write_filer_table(struct gfar_private *priv,
1579 struct filer_table *tab) 1254 struct filer_table *tab)
@@ -1583,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1583 return -EBUSY; 1258 return -EBUSY;
1584 1259
1585 /* Fill regular entries */ 1260 /* Fill regular entries */
1586 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); 1261 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1587 i++)
1588 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1262 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1589 /* Fill the rest with fall-troughs */ 1263 /* Fill the rest with fall-troughs */
1590 for (; i < MAX_FILER_IDX - 1; i++) 1264 for (; i < MAX_FILER_IDX; i++)
1591 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1265 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1592 /* Last entry must be default accept 1266 /* Last entry must be default accept
1593 * because that's what people expect 1267 * because that's what people expect
@@ -1621,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1621{ 1295{
1622 struct ethtool_flow_spec_container *j; 1296 struct ethtool_flow_spec_container *j;
1623 struct filer_table *tab; 1297 struct filer_table *tab;
1624 s32 i = 0;
1625 s32 ret = 0; 1298 s32 ret = 0;
1626 1299
1627 /* So index is set to zero, too! */ 1300 /* So index is set to zero, too! */
@@ -1646,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1646 } 1319 }
1647 } 1320 }
1648 1321
1649 i = tab->index;
1650
1651 /* Optimizations to save entries */
1652 gfar_cluster_filer(tab);
1653 gfar_optimize_filer_masks(tab);
1654
1655 pr_debug("\tSummary:\n"
1656 "\tData on hardware: %d\n"
1657 "\tCompression rate: %d%%\n",
1658 tab->index, 100 - (100 * tab->index) / i);
1659
1660 /* Write everything to hardware */ 1322 /* Write everything to hardware */
1661 ret = gfar_write_filer_table(priv, tab); 1323 ret = gfar_write_filer_table(priv, tab);
1662 if (ret == -EBUSY) { 1324 if (ret == -EBUSY) {
@@ -1722,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
1722 } 1384 }
1723 1385
1724process: 1386process:
1387 priv->rx_list.count++;
1725 ret = gfar_process_filer_changes(priv); 1388 ret = gfar_process_filer_changes(priv);
1726 if (ret) 1389 if (ret)
1727 goto clean_list; 1390 goto clean_list;
1728 priv->rx_list.count++;
1729 return ret; 1391 return ret;
1730 1392
1731clean_list: 1393clean_list:
1394 priv->rx_list.count--;
1732 list_del(&temp->list); 1395 list_del(&temp->list);
1733clean_mem: 1396clean_mem:
1734 kfree(temp); 1397 kfree(temp);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..b5b2925103ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
216 216
217static inline bool fm10k_page_is_reserved(struct page *page) 217static inline bool fm10k_page_is_reserved(struct page *page)
218{ 218{
219 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
220} 220}
221 221
222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..830466c49987 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6566 6566
6567static inline bool igb_page_is_reserved(struct page *page) 6567static inline bool igb_page_is_reserved(struct page *page)
6568{ 6568{
6569 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 6569 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
6570} 6570}
6571 6571
6572static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6572static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..ae21e0b06c3a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1832 1832
1833static inline bool ixgbe_page_is_reserved(struct page *page) 1833static inline bool ixgbe_page_is_reserved(struct page *page)
1834{ 1834{
1835 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 1835 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1836} 1836}
1837 1837
1838/** 1838/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..1d7b00b038a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
765 765
766static inline bool ixgbevf_page_is_reserved(struct page *page) 766static inline bool ixgbevf_page_is_reserved(struct page *page)
767{ 767{
768 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 768 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
769} 769}
770 770
771/** 771/**
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
27#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/hrtimer.h>
31#include <linux/ktime.h>
30#include <uapi/linux/ppp_defs.h> 32#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h> 33#include <net/ip.h>
32#include <net/ipv6.h> 34#include <net/ipv6.h>
@@ -299,6 +301,7 @@
299 301
300/* Coalescing */ 302/* Coalescing */
301#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 303#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
302#define MVPP2_RX_COAL_PKTS 32 305#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100 306#define MVPP2_RX_COAL_USEC 100
304 307
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
660 u64 tx_bytes; 663 u64 tx_bytes;
661}; 664};
662 665
666/* Per-CPU port control */
667struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
672};
673
663struct mvpp2_port { 674struct mvpp2_port {
664 u8 id; 675 u8 id;
665 676
@@ -679,6 +690,9 @@ struct mvpp2_port {
679 u32 pending_cause_rx; 690 u32 pending_cause_rx;
680 struct napi_struct napi; 691 struct napi_struct napi;
681 692
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
695
682 /* Flags */ 696 /* Flags */
683 unsigned long flags; 697 unsigned long flags;
684 698
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
776 /* Array of transmitted skb */ 790 /* Array of transmitted skb */
777 struct sk_buff **tx_skb; 791 struct sk_buff **tx_skb;
778 792
793 /* Array of transmitted buffers' physical addresses */
794 dma_addr_t *tx_buffs;
795
779 /* Index of last TX DMA descriptor that was inserted */ 796 /* Index of last TX DMA descriptor that was inserted */
780 int txq_put_index; 797 int txq_put_index;
781 798
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
913 /* Occupied buffers indicator */ 930 /* Occupied buffers indicator */
914 atomic_t in_use; 931 atomic_t in_use;
915 int in_use_thresh; 932 int in_use_thresh;
916
917 spinlock_t lock;
918}; 933};
919 934
920struct mvpp2_buff_hdr { 935struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
963} 978}
964 979
965static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, 980static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
966 struct sk_buff *skb) 981 struct sk_buff *skb,
982 struct mvpp2_tx_desc *tx_desc)
967{ 983{
968 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; 984 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
985 if (skb)
986 txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987 tx_desc->buf_phys_addr;
969 txq_pcpu->txq_put_index++; 988 txq_pcpu->txq_put_index++;
970 if (txq_pcpu->txq_put_index == txq_pcpu->size) 989 if (txq_pcpu->txq_put_index == txq_pcpu->size)
971 txq_pcpu->txq_put_index = 0; 990 txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
3376 bm_pool->pkt_size = 0; 3395 bm_pool->pkt_size = 0;
3377 bm_pool->buf_num = 0; 3396 bm_pool->buf_num = 0;
3378 atomic_set(&bm_pool->in_use, 0); 3397 atomic_set(&bm_pool->in_use, 0);
3379 spin_lock_init(&bm_pool->lock);
3380 3398
3381 return 0; 3399 return 0;
3382} 3400}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
3647mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 3665mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3648 int pkt_size) 3666 int pkt_size)
3649{ 3667{
3650 unsigned long flags = 0;
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 3668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3652 int num; 3669 int num;
3653 3670
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3656 return NULL; 3673 return NULL;
3657 } 3674 }
3658 3675
3659 spin_lock_irqsave(&new_pool->lock, flags);
3660
3661 if (new_pool->type == MVPP2_BM_FREE) 3676 if (new_pool->type == MVPP2_BM_FREE)
3662 new_pool->type = type; 3677 new_pool->type = type;
3663 3678
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3686 if (num != pkts_num) { 3701 if (num != pkts_num) {
3687 WARN(1, "pool %d: %d of %d allocated\n", 3702 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool->id, num, pkts_num); 3703 new_pool->id, num, pkts_num);
3689 /* We need to undo the bufs_add() allocations */
3690 spin_unlock_irqrestore(&new_pool->lock, flags);
3691 return NULL; 3704 return NULL;
3692 } 3705 }
3693 } 3706 }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3695 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 3708 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3696 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 3709 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3697 3710
3698 spin_unlock_irqrestore(&new_pool->lock, flags);
3699
3700 return new_pool; 3711 return new_pool;
3701} 3712}
3702 3713
3703/* Initialize pools for swf */ 3714/* Initialize pools for swf */
3704static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 3715static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3705{ 3716{
3706 unsigned long flags = 0;
3707 int rxq; 3717 int rxq;
3708 3718
3709 if (!port->pool_long) { 3719 if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3714 if (!port->pool_long) 3724 if (!port->pool_long)
3715 return -ENOMEM; 3725 return -ENOMEM;
3716 3726
3717 spin_lock_irqsave(&port->pool_long->lock, flags);
3718 port->pool_long->port_map |= (1 << port->id); 3727 port->pool_long->port_map |= (1 << port->id);
3719 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3720 3728
3721 for (rxq = 0; rxq < rxq_number; rxq++) 3729 for (rxq = 0; rxq < rxq_number; rxq++)
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 3730 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3730 if (!port->pool_short) 3738 if (!port->pool_short)
3731 return -ENOMEM; 3739 return -ENOMEM;
3732 3740
3733 spin_lock_irqsave(&port->pool_short->lock, flags);
3734 port->pool_short->port_map |= (1 << port->id); 3741 port->pool_short->port_map |= (1 << port->id);
3735 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3736 3742
3737 for (rxq = 0; rxq < rxq_number; rxq++) 3743 for (rxq = 0; rxq < rxq_number; rxq++)
3738 mvpp2_rxq_short_pool_set(port, rxq, 3744 mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
3806 3812
3807 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3808 (MVPP2_CAUSE_MISC_SUM_MASK | 3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); 3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3811} 3816}
3812 3817
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4382 rxq->time_coal = usec; 4387 rxq->time_coal = usec;
4383} 4388}
4384 4389
4385/* Set threshold for TX_DONE pkts coalescing */
4386static void mvpp2_tx_done_pkts_coal_set(void *arg)
4387{
4388 struct mvpp2_port *port = arg;
4389 int queue;
4390 u32 val;
4391
4392 for (queue = 0; queue < txq_number; queue++) {
4393 struct mvpp2_tx_queue *txq = port->txqs[queue];
4394
4395 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4396 MVPP2_TRANSMITTED_THRESH_MASK;
4397 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4398 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4399 }
4400}
4401
4402/* Free Tx queue skbuffs */ 4390/* Free Tx queue skbuffs */
4403static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4391static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4404 struct mvpp2_tx_queue *txq, 4392 struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4407 int i; 4395 int i;
4408 4396
4409 for (i = 0; i < num; i++) { 4397 for (i = 0; i < num; i++) {
4410 struct mvpp2_tx_desc *tx_desc = txq->descs + 4398 dma_addr_t buf_phys_addr =
4411 txq_pcpu->txq_get_index; 4399 txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4412 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; 4400 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4413 4401
4414 mvpp2_txq_inc_get(txq_pcpu); 4402 mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4416 if (!skb) 4404 if (!skb)
4417 continue; 4405 continue;
4418 4406
4419 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, 4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4420 tx_desc->data_size, DMA_TO_DEVICE); 4408 skb_headlen(skb), DMA_TO_DEVICE);
4421 dev_kfree_skb_any(skb); 4409 dev_kfree_skb_any(skb);
4422 } 4410 }
4423} 4411}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4433static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4421static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4434 u32 cause) 4422 u32 cause)
4435{ 4423{
4436 int queue = fls(cause >> 16) - 1; 4424 int queue = fls(cause) - 1;
4437 4425
4438 return port->txqs[queue]; 4426 return port->txqs[queue];
4439} 4427}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4460 netif_tx_wake_queue(nq); 4448 netif_tx_wake_queue(nq);
4461} 4449}
4462 4450
4451static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4452{
4453 struct mvpp2_tx_queue *txq;
4454 struct mvpp2_txq_pcpu *txq_pcpu;
4455 unsigned int tx_todo = 0;
4456
4457 while (cause) {
4458 txq = mvpp2_get_tx_queue(port, cause);
4459 if (!txq)
4460 break;
4461
4462 txq_pcpu = this_cpu_ptr(txq->pcpu);
4463
4464 if (txq_pcpu->count) {
4465 mvpp2_txq_done(port, txq, txq_pcpu);
4466 tx_todo += txq_pcpu->count;
4467 }
4468
4469 cause &= ~(1 << txq->log_id);
4470 }
4471 return tx_todo;
4472}
4473
4463/* Rx/Tx queue initialization/cleanup methods */ 4474/* Rx/Tx queue initialization/cleanup methods */
4464 4475
4465/* Allocate and initialize descriptors for aggr TXQ */ 4476/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4649 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * 4660 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4650 sizeof(*txq_pcpu->tx_skb), 4661 sizeof(*txq_pcpu->tx_skb),
4651 GFP_KERNEL); 4662 GFP_KERNEL);
4652 if (!txq_pcpu->tx_skb) { 4663 if (!txq_pcpu->tx_skb)
4653 dma_free_coherent(port->dev->dev.parent, 4664 goto error;
4654 txq->size * MVPP2_DESC_ALIGNED_SIZE, 4665
4655 txq->descs, txq->descs_phys); 4666 txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4656 return -ENOMEM; 4667 sizeof(dma_addr_t), GFP_KERNEL);
4657 } 4668 if (!txq_pcpu->tx_buffs)
4669 goto error;
4658 4670
4659 txq_pcpu->count = 0; 4671 txq_pcpu->count = 0;
4660 txq_pcpu->reserved_num = 0; 4672 txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4663 } 4675 }
4664 4676
4665 return 0; 4677 return 0;
4678
4679error:
4680 for_each_present_cpu(cpu) {
4681 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4682 kfree(txq_pcpu->tx_skb);
4683 kfree(txq_pcpu->tx_buffs);
4684 }
4685
4686 dma_free_coherent(port->dev->dev.parent,
4687 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4688 txq->descs, txq->descs_phys);
4689
4690 return -ENOMEM;
4666} 4691}
4667 4692
4668/* Free allocated TXQ resources */ 4693/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
4675 for_each_present_cpu(cpu) { 4700 for_each_present_cpu(cpu) {
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4701 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4677 kfree(txq_pcpu->tx_skb); 4702 kfree(txq_pcpu->tx_skb);
4703 kfree(txq_pcpu->tx_buffs);
4678 } 4704 }
4679 4705
4680 if (txq->descs) 4706 if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
4805 goto err_cleanup; 4831 goto err_cleanup;
4806 } 4832 }
4807 4833
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 4834 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4810 return 0; 4835 return 0;
4811 4836
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
4887 } 4912 }
4888} 4913}
4889 4914
4915static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4916{
4917 ktime_t interval;
4918
4919 if (!port_pcpu->timer_scheduled) {
4920 port_pcpu->timer_scheduled = true;
4921 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4922 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4923 HRTIMER_MODE_REL_PINNED);
4924 }
4925}
4926
4927static void mvpp2_tx_proc_cb(unsigned long data)
4928{
4929 struct net_device *dev = (struct net_device *)data;
4930 struct mvpp2_port *port = netdev_priv(dev);
4931 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4932 unsigned int tx_todo, cause;
4933
4934 if (!netif_running(dev))
4935 return;
4936 port_pcpu->timer_scheduled = false;
4937
4938 /* Process all the Tx queues */
4939 cause = (1 << txq_number) - 1;
4940 tx_todo = mvpp2_tx_done(port, cause);
4941
4942 /* Set the timer in case not all the packets were processed */
4943 if (tx_todo)
4944 mvpp2_timer_set(port_pcpu);
4945}
4946
4947static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4948{
4949 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4950 struct mvpp2_port_pcpu,
4951 tx_done_timer);
4952
4953 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4954
4955 return HRTIMER_NORESTART;
4956}
4957
4890/* Main RX/TX processing routines */ 4958/* Main RX/TX processing routines */
4891 4959
4892/* Display more error info */ 4960/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5144 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 5212 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5145 /* Last descriptor */ 5213 /* Last descriptor */
5146 tx_desc->command = MVPP2_TXD_L_DESC; 5214 tx_desc->command = MVPP2_TXD_L_DESC;
5147 mvpp2_txq_inc_put(txq_pcpu, skb); 5215 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5148 } else { 5216 } else {
5149 /* Descriptor in the middle: Not First, Not Last */ 5217 /* Descriptor in the middle: Not First, Not Last */
5150 tx_desc->command = 0; 5218 tx_desc->command = 0;
5151 mvpp2_txq_inc_put(txq_pcpu, NULL); 5219 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5152 } 5220 }
5153 } 5221 }
5154 5222
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5214 /* First and Last descriptor */ 5282 /* First and Last descriptor */
5215 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 5283 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5216 tx_desc->command = tx_cmd; 5284 tx_desc->command = tx_cmd;
5217 mvpp2_txq_inc_put(txq_pcpu, skb); 5285 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5218 } else { 5286 } else {
5219 /* First but not Last */ 5287 /* First but not Last */
5220 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 5288 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5221 tx_desc->command = tx_cmd; 5289 tx_desc->command = tx_cmd;
5222 mvpp2_txq_inc_put(txq_pcpu, NULL); 5290 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5223 5291
5224 /* Continue with other skb fragments */ 5292 /* Continue with other skb fragments */
5225 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 5293 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
5255 dev_kfree_skb_any(skb); 5323 dev_kfree_skb_any(skb);
5256 } 5324 }
5257 5325
5326 /* Finalize TX processing */
5327 if (txq_pcpu->count >= txq->done_pkts_coal)
5328 mvpp2_txq_done(port, txq, txq_pcpu);
5329
5330 /* Set the timer in case not all frags were processed */
5331 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5332 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5333
5334 mvpp2_timer_set(port_pcpu);
5335 }
5336
5258 return NETDEV_TX_OK; 5337 return NETDEV_TX_OK;
5259} 5338}
5260 5339
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5268 netdev_err(dev, "tx fifo underrun error\n"); 5347 netdev_err(dev, "tx fifo underrun error\n");
5269} 5348}
5270 5349
5271static void mvpp2_txq_done_percpu(void *arg) 5350static int mvpp2_poll(struct napi_struct *napi, int budget)
5272{ 5351{
5273 struct mvpp2_port *port = arg; 5352 u32 cause_rx_tx, cause_rx, cause_misc;
5274 u32 cause_rx_tx, cause_tx, cause_misc; 5353 int rx_done = 0;
5354 struct mvpp2_port *port = netdev_priv(napi->dev);
5275 5355
5276 /* Rx/Tx cause register 5356 /* Rx/Tx cause register
5277 * 5357 *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
5285 */ 5365 */
5286 cause_rx_tx = mvpp2_read(port->priv, 5366 cause_rx_tx = mvpp2_read(port->priv,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5367 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5288 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5368 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5289 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5369 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5290 5370
5291 if (cause_misc) { 5371 if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
5297 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 5377 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5298 } 5378 }
5299 5379
5300 /* Release TX descriptors */
5301 if (cause_tx) {
5302 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5303 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5304
5305 if (txq_pcpu->count)
5306 mvpp2_txq_done(port, txq, txq_pcpu);
5307 }
5308}
5309
5310static int mvpp2_poll(struct napi_struct *napi, int budget)
5311{
5312 u32 cause_rx_tx, cause_rx;
5313 int rx_done = 0;
5314 struct mvpp2_port *port = netdev_priv(napi->dev);
5315
5316 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5317
5318 cause_rx_tx = mvpp2_read(port->priv,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5320 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5380 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5321 5381
5322 /* Process RX packets */ 5382 /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
5561static int mvpp2_stop(struct net_device *dev) 5621static int mvpp2_stop(struct net_device *dev)
5562{ 5622{
5563 struct mvpp2_port *port = netdev_priv(dev); 5623 struct mvpp2_port *port = netdev_priv(dev);
5624 struct mvpp2_port_pcpu *port_pcpu;
5625 int cpu;
5564 5626
5565 mvpp2_stop_dev(port); 5627 mvpp2_stop_dev(port);
5566 mvpp2_phy_disconnect(port); 5628 mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
5569 on_each_cpu(mvpp2_interrupts_mask, port, 1); 5631 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5570 5632
5571 free_irq(port->irq, port); 5633 free_irq(port->irq, port);
5634 for_each_present_cpu(cpu) {
5635 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5636
5637 hrtimer_cancel(&port_pcpu->tx_done_timer);
5638 port_pcpu->timer_scheduled = false;
5639 tasklet_kill(&port_pcpu->tx_done_tasklet);
5640 }
5572 mvpp2_cleanup_rxqs(port); 5641 mvpp2_cleanup_rxqs(port);
5573 mvpp2_cleanup_txqs(port); 5642 mvpp2_cleanup_txqs(port);
5574 5643
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5784 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5785 } 5854 }
5786 5855
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5788 return 0; 5856 return 0;
5789} 5857}
5790 5858
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6035{ 6103{
6036 struct device_node *phy_node; 6104 struct device_node *phy_node;
6037 struct mvpp2_port *port; 6105 struct mvpp2_port *port;
6106 struct mvpp2_port_pcpu *port_pcpu;
6038 struct net_device *dev; 6107 struct net_device *dev;
6039 struct resource *res; 6108 struct resource *res;
6040 const char *dt_mac_addr; 6109 const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6044 int features; 6113 int features;
6045 int phy_mode; 6114 int phy_mode;
6046 int priv_common_regs_num = 2; 6115 int priv_common_regs_num = 2;
6047 int err, i; 6116 int err, i, cpu;
6048 6117
6049 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, 6118 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6050 rxq_number); 6119 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6135 } 6204 }
6136 mvpp2_port_power_up(port); 6205 mvpp2_port_power_up(port);
6137 6206
6207 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6208 if (!port->pcpu) {
6209 err = -ENOMEM;
6210 goto err_free_txq_pcpu;
6211 }
6212
6213 for_each_present_cpu(cpu) {
6214 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6215
6216 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6217 HRTIMER_MODE_REL_PINNED);
6218 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6219 port_pcpu->timer_scheduled = false;
6220
6221 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6222 (unsigned long)dev);
6223 }
6224
6138 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); 6225 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6139 features = NETIF_F_SG | NETIF_F_IP_CSUM; 6226 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6140 dev->features = features | NETIF_F_RXCSUM; 6227 dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6144 err = register_netdev(dev); 6231 err = register_netdev(dev);
6145 if (err < 0) { 6232 if (err < 0) {
6146 dev_err(&pdev->dev, "failed to register netdev\n"); 6233 dev_err(&pdev->dev, "failed to register netdev\n");
6147 goto err_free_txq_pcpu; 6234 goto err_free_port_pcpu;
6148 } 6235 }
6149 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6236 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6150 6237
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6153 priv->port_list[id] = port; 6240 priv->port_list[id] = port;
6154 return 0; 6241 return 0;
6155 6242
6243err_free_port_pcpu:
6244 free_percpu(port->pcpu);
6156err_free_txq_pcpu: 6245err_free_txq_pcpu:
6157 for (i = 0; i < txq_number; i++) 6246 for (i = 0; i < txq_number; i++)
6158 free_percpu(port->txqs[i]->pcpu); 6247 free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
6171 int i; 6260 int i;
6172 6261
6173 unregister_netdev(port->dev); 6262 unregister_netdev(port->dev);
6263 free_percpu(port->pcpu);
6174 free_percpu(port->stats); 6264 free_percpu(port->stats);
6175 for (i = 0; i < txq_number; i++) 6265 for (i = 0; i < txq_number; i++)
6176 free_percpu(port->txqs[i]->pcpu); 6266 free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
391 /* disable cmdif checksum */ 391 /* disable cmdif checksum */
392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
393 393
394 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
395
394 err = set_caps(dev, set_ctx, set_sz); 396 err = set_caps(dev, set_ctx, set_sz);
395 397
396query_ex: 398query_ex:
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
952 952
953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
955 err = dma_mapping_error(adapter->dev, 955 if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
956 sg_dma_address(&tx_ctl->sg)); 956 err = -ENOMEM;
957 if (err) {
958 sg_dma_address(&tx_ctl->sg) = 0; 957 sg_dma_address(&tx_ctl->sg) = 0;
959 goto err; 958 goto err;
960 } 959 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4875 case RTL_GIGA_MAC_VER_46: 4875 case RTL_GIGA_MAC_VER_46:
4876 case RTL_GIGA_MAC_VER_47: 4876 case RTL_GIGA_MAC_VER_47:
4877 case RTL_GIGA_MAC_VER_48: 4877 case RTL_GIGA_MAC_VER_48:
4878 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4879 break;
4878 case RTL_GIGA_MAC_VER_49: 4880 case RTL_GIGA_MAC_VER_49:
4879 case RTL_GIGA_MAC_VER_50: 4881 case RTL_GIGA_MAC_VER_50:
4880 case RTL_GIGA_MAC_VER_51: 4882 case RTL_GIGA_MAC_VER_51:
4881 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); 4883 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4882 break; 4884 break;
4883 default: 4885 default:
4884 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4886 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4822 ROCKER_OP_FLAG_REMOVE); 4822 ROCKER_OP_FLAG_REMOVE);
4823 unregister_netdev(rocker_port->dev); 4823 unregister_netdev(rocker_port->dev);
4824 free_netdev(rocker_port->dev);
4824 } 4825 }
4825 kfree(rocker->ports); 4826 kfree(rocker->ports);
4826} 4827}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
42#define NSS_COMMON_CLK_DIV_MASK 0x7f 42#define NSS_COMMON_CLK_DIV_MASK 0x7f
43 43
44#define NSS_COMMON_CLK_SRC_CTRL 0x14 44#define NSS_COMMON_CLK_SRC_CTRL 0x14
45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) 45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
46/* Mode is coded on 1 bit but is different depending on the MAC ID: 46/* Mode is coded on 1 bit but is different depending on the MAC ID:
47 * MAC0: QSGMII=0 RGMII=1 47 * MAC0: QSGMII=0 RGMII=1
48 * MAC1: QSGMII=0 SGMII=0 RGMII=1 48 * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
291 291
292 /* Configure the clock src according to the mode */ 292 /* Configure the clock src according to the mode */
293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); 293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
294 val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); 294 val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
295 switch (gmac->phy_mode) { 295 switch (gmac->phy_mode) {
296 case PHY_INTERFACE_MODE_RGMII: 296 case PHY_INTERFACE_MODE_RGMII:
297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << 297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index a8a730641bbb..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
85 struct list_head rxhook_list_head; 85 struct list_head rxhook_list_head;
86 unsigned int rx_queue_id; 86 unsigned int rx_queue_id;
87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; 87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
88 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
89 struct napi_struct rx_napi; 88 struct napi_struct rx_napi;
90 struct napi_struct tx_napi; 89 struct napi_struct tx_napi;
91 90
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9749dfd78c43..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64 35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ) 36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN 38#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16 39#define NETCP_MAX_MCAST_ADDR 16
39 40
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
804 if (likely(fdq == 0)) { 805 if (likely(fdq == 0)) {
805 unsigned int primary_buf_len; 806 unsigned int primary_buf_len;
806 /* Allocate a primary receive queue entry */ 807 /* Allocate a primary receive queue entry */
807 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; 808 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
808 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
810 811
811 if (primary_buf_len <= PAGE_SIZE) { 812 bufptr = netdev_alloc_frag(primary_buf_len);
812 bufptr = netdev_alloc_frag(primary_buf_len); 813 pad[1] = primary_buf_len;
813 pad[1] = primary_buf_len;
814 } else {
815 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
816 GFP_DMA32 | __GFP_COLD);
817 pad[1] = 0;
818 }
819 814
820 if (unlikely(!bufptr)) { 815 if (unlikely(!bufptr)) {
821 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); 816 dev_warn_ratelimited(netcp->ndev_dev,
817 "Primary RX buffer alloc failed\n");
822 goto fail; 818 goto fail;
823 } 819 }
824 dma = dma_map_single(netcp->dev, bufptr, buf_len, 820 dma = dma_map_single(netcp->dev, bufptr, buf_len,
825 DMA_TO_DEVICE); 821 DMA_TO_DEVICE);
822 if (unlikely(dma_mapping_error(netcp->dev, dma)))
823 goto fail;
824
826 pad[0] = (u32)bufptr; 825 pad[0] = (u32)bufptr;
827 826
828 } else { 827 } else {
829 /* Allocate a secondary receive queue entry */ 828 /* Allocate a secondary receive queue entry */
830 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); 829 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
831 if (unlikely(!page)) { 830 if (unlikely(!page)) {
832 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 831 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
833 goto fail; 832 goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1010 1009
1011 /* Map the linear buffer */ 1010 /* Map the linear buffer */
1012 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1011 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1013 if (unlikely(!dma_addr)) { 1012 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1014 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1013 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1015 return NULL; 1014 return NULL;
1016 } 1015 }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1546 knav_queue_disable_notify(netcp->rx_queue); 1545 knav_queue_disable_notify(netcp->rx_queue);
1547 1546
1548 /* open Rx FDQs */ 1547 /* open Rx FDQs */
1549 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1548 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1550 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { 1549 ++i) {
1551 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1550 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1552 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1551 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1553 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { 1552 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
1941 netcp->rx_queue_depths[0] = 128; 1940 netcp->rx_queue_depths[0] = 128;
1942 } 1941 }
1943 1942
1944 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1945 netcp->rx_buffer_sizes,
1946 KNAV_DMA_FDQ_PER_CHAN);
1947 if (ret) {
1948 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1949 netcp->rx_buffer_sizes[0] = 1536;
1950 }
1951
1952 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 1943 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1953 if (ret < 0) { 1944 if (ret < 0) {
1954 dev_err(dev, "missing \"rx-pool\" parameter\n"); 1945 dev_err(dev, "missing \"rx-pool\" parameter\n");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
728 dev->type = ARPHRD_AX25; 728 dev->type = ARPHRD_AX25;
729 729
730 /* Perform the low-level AX25 initialization. */ 730 /* Perform the low-level AX25 initialization. */
731 if ((err = ax_open(ax->dev))) { 731 err = ax_open(ax->dev);
732 if (err)
732 goto out_free_netdev; 733 goto out_free_netdev;
733 }
734 734
735 if (register_netdev(dev)) 735 err = register_netdev(dev);
736 if (err)
736 goto out_free_buffers; 737 goto out_free_buffers;
737 738
738 /* after register_netdev() - because else printk smashes the kernel */ 739 /* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
102 102
103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); 103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
104 104
105 if (len < 0) {
106 ndev->stats.rx_errors++;
107 ndev->stats.rx_length_errors++;
108 goto enqueue_again;
109 }
110
105 skb_put(skb, len); 111 skb_put(skb, len);
106 skb->protocol = eth_type_trans(skb, ndev); 112 skb->protocol = eth_type_trans(skb, ndev);
107 skb->ip_summed = CHECKSUM_NONE; 113 skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
121 return; 127 return;
122 } 128 }
123 129
130enqueue_again:
124 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 131 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
125 if (rc) { 132 if (rc) {
126 dev_kfree_skb(skb); 133 dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
184 191
185 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
186 ndev->mtu + ETH_HLEN); 193 ndev->mtu + ETH_HLEN);
187 if (rc == -EINVAL) { 194 if (rc) {
188 dev_kfree_skb(skb); 195 dev_kfree_skb(skb);
189 goto err; 196 goto err;
190 } 197 }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..1e1fbb049ec6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
811 bool needs_aneg = false, do_suspend = false; 811 bool needs_aneg = false, do_suspend = false;
812 enum phy_state old_state; 812 enum phy_state old_state;
813 int err = 0; 813 int err = 0;
814 int old_link;
814 815
815 mutex_lock(&phydev->lock); 816 mutex_lock(&phydev->lock);
816 817
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
896 phydev->adjust_link(phydev->attached_dev); 897 phydev->adjust_link(phydev->attached_dev);
897 break; 898 break;
898 case PHY_RUNNING: 899 case PHY_RUNNING:
899 /* Only register a CHANGE if we are 900 /* Only register a CHANGE if we are polling or ignoring
900 * polling or ignoring interrupts 901 * interrupts and link changed since latest checking.
901 */ 902 */
902 if (!phy_interrupt_is_valid(phydev)) 903 if (!phy_interrupt_is_valid(phydev)) {
903 phydev->state = PHY_CHANGELINK; 904 old_link = phydev->link;
905 err = phy_read_status(phydev);
906 if (err)
907 break;
908
909 if (old_link != phydev->link)
910 phydev->state = PHY_CHANGELINK;
911 }
904 break; 912 break;
905 case PHY_CHANGELINK: 913 case PHY_CHANGELINK:
906 err = phy_read_status(phydev); 914 err = phy_read_status(phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
91} 91}
92 92
93/* 93/*
94 * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each 94 * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
95 * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner 95 * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
96 * does send the pulses within this interval, the PHY will remained powered 96 * unstable detection of plugging in Ethernet cable.
97 * down. 97 * This workaround disables Energy Detect Power-Down mode and waiting for
98 * 98 * response on link pulses to detect presence of plugged Ethernet cable.
99 * This workaround will manually toggle the PHY on/off upon calls to read_status 99 * The Energy Detect Power-Down mode is enabled again in the end of procedure to
100 * in order to generate link test pulses if the link is down. If a link partner 100 * save approximately 220 mW of power if cable is unplugged.
101 * is present, it will respond to the pulses, which will cause the ENERGYON bit
102 * to be set and will cause the EDPD mode to be exited.
103 */ 101 */
104static int lan87xx_read_status(struct phy_device *phydev) 102static int lan87xx_read_status(struct phy_device *phydev)
105{ 103{
106 int err = genphy_read_status(phydev); 104 int err = genphy_read_status(phydev);
105 int i;
107 106
108 if (!phydev->link) { 107 if (!phydev->link) {
109 /* Disable EDPD to wake up PHY */ 108 /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
116 if (rc < 0) 115 if (rc < 0)
117 return rc; 116 return rc;
118 117
119 /* Sleep 64 ms to allow ~5 link test pulses to be sent */ 118 /* Wait max 640 ms to detect energy */
120 msleep(64); 119 for (i = 0; i < 64; i++) {
120 /* Sleep to allow link test pulses to be sent */
121 msleep(10);
122 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
123 if (rc < 0)
124 return rc;
125 if (rc & MII_LAN83C185_ENERGYON)
126 break;
127 }
121 128
122 /* Re-enable EDPD */ 129 /* Re-enable EDPD */
123 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 130 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
191 198
192 /* basic functions */ 199 /* basic functions */
193 .config_aneg = genphy_config_aneg, 200 .config_aneg = genphy_config_aneg,
194 .read_status = genphy_read_status, 201 .read_status = lan87xx_read_status,
195 .config_init = smsc_phy_config_init, 202 .config_init = smsc_phy_config_init,
196 .soft_reset = smsc_phy_reset, 203 .soft_reset = smsc_phy_reset,
197 204
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..fa8f5046afe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
269static void ppp_ccp_closed(struct ppp *ppp); 269static void ppp_ccp_closed(struct ppp *ppp);
270static struct compressor *find_compressor(int type); 270static struct compressor *find_compressor(int type);
271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
272static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 272static struct ppp *ppp_create_interface(struct net *net, int unit,
273 struct file *file, int *retp);
273static void init_ppp_file(struct ppp_file *pf, int kind); 274static void init_ppp_file(struct ppp_file *pf, int kind);
274static void ppp_shutdown_interface(struct ppp *ppp);
275static void ppp_destroy_interface(struct ppp *ppp); 275static void ppp_destroy_interface(struct ppp *ppp);
276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
392 file->private_data = NULL; 392 file->private_data = NULL;
393 if (pf->kind == INTERFACE) { 393 if (pf->kind == INTERFACE) {
394 ppp = PF_TO_PPP(pf); 394 ppp = PF_TO_PPP(pf);
395 rtnl_lock();
395 if (file == ppp->owner) 396 if (file == ppp->owner)
396 ppp_shutdown_interface(ppp); 397 unregister_netdevice(ppp->dev);
398 rtnl_unlock();
397 } 399 }
398 if (atomic_dec_and_test(&pf->refcnt)) { 400 if (atomic_dec_and_test(&pf->refcnt)) {
399 switch (pf->kind) { 401 switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
593 mutex_lock(&ppp_mutex); 595 mutex_lock(&ppp_mutex);
594 if (pf->kind == INTERFACE) { 596 if (pf->kind == INTERFACE) {
595 ppp = PF_TO_PPP(pf); 597 ppp = PF_TO_PPP(pf);
598 rtnl_lock();
596 if (file == ppp->owner) 599 if (file == ppp->owner)
597 ppp_shutdown_interface(ppp); 600 unregister_netdevice(ppp->dev);
601 rtnl_unlock();
598 } 602 }
599 if (atomic_long_read(&file->f_count) < 2) { 603 if (atomic_long_read(&file->f_count) < 2) {
600 ppp_release(NULL, file); 604 ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
838 /* Create a new ppp unit */ 842 /* Create a new ppp unit */
839 if (get_user(unit, p)) 843 if (get_user(unit, p))
840 break; 844 break;
841 ppp = ppp_create_interface(net, unit, &err); 845 ppp = ppp_create_interface(net, unit, file, &err);
842 if (!ppp) 846 if (!ppp)
843 break; 847 break;
844 file->private_data = &ppp->file; 848 file->private_data = &ppp->file;
845 ppp->owner = file;
846 err = -EFAULT; 849 err = -EFAULT;
847 if (put_user(ppp->file.index, p)) 850 if (put_user(ppp->file.index, p))
848 break; 851 break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
916static __net_exit void ppp_exit_net(struct net *net) 919static __net_exit void ppp_exit_net(struct net *net)
917{ 920{
918 struct ppp_net *pn = net_generic(net, ppp_net_id); 921 struct ppp_net *pn = net_generic(net, ppp_net_id);
922 struct ppp *ppp;
923 LIST_HEAD(list);
924 int id;
925
926 rtnl_lock();
927 idr_for_each_entry(&pn->units_idr, ppp, id)
928 unregister_netdevice_queue(ppp->dev, &list);
929
930 unregister_netdevice_many(&list);
931 rtnl_unlock();
919 932
920 idr_destroy(&pn->units_idr); 933 idr_destroy(&pn->units_idr);
921} 934}
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
1088 return 0; 1101 return 0;
1089} 1102}
1090 1103
1104static void ppp_dev_uninit(struct net_device *dev)
1105{
1106 struct ppp *ppp = netdev_priv(dev);
1107 struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1108
1109 ppp_lock(ppp);
1110 ppp->closing = 1;
1111 ppp_unlock(ppp);
1112
1113 mutex_lock(&pn->all_ppp_mutex);
1114 unit_put(&pn->units_idr, ppp->file.index);
1115 mutex_unlock(&pn->all_ppp_mutex);
1116
1117 ppp->owner = NULL;
1118
1119 ppp->file.dead = 1;
1120 wake_up_interruptible(&ppp->file.rwait);
1121}
1122
1091static const struct net_device_ops ppp_netdev_ops = { 1123static const struct net_device_ops ppp_netdev_ops = {
1092 .ndo_init = ppp_dev_init, 1124 .ndo_init = ppp_dev_init,
1125 .ndo_uninit = ppp_dev_uninit,
1093 .ndo_start_xmit = ppp_start_xmit, 1126 .ndo_start_xmit = ppp_start_xmit,
1094 .ndo_do_ioctl = ppp_net_ioctl, 1127 .ndo_do_ioctl = ppp_net_ioctl,
1095 .ndo_get_stats64 = ppp_get_stats64, 1128 .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2667 * or if there is already a unit with the requested number. 2700 * or if there is already a unit with the requested number.
2668 * unit == -1 means allocate a new number. 2701 * unit == -1 means allocate a new number.
2669 */ 2702 */
2670static struct ppp * 2703static struct ppp *ppp_create_interface(struct net *net, int unit,
2671ppp_create_interface(struct net *net, int unit, int *retp) 2704 struct file *file, int *retp)
2672{ 2705{
2673 struct ppp *ppp; 2706 struct ppp *ppp;
2674 struct ppp_net *pn; 2707 struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2688 ppp->mru = PPP_MRU; 2721 ppp->mru = PPP_MRU;
2689 init_ppp_file(&ppp->file, INTERFACE); 2722 init_ppp_file(&ppp->file, INTERFACE);
2690 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2723 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
2724 ppp->owner = file;
2691 for (i = 0; i < NUM_NP; ++i) 2725 for (i = 0; i < NUM_NP; ++i)
2692 ppp->npmode[i] = NPMODE_PASS; 2726 ppp->npmode[i] = NPMODE_PASS;
2693 INIT_LIST_HEAD(&ppp->channels); 2727 INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
2776} 2810}
2777 2811
2778/* 2812/*
2779 * Take down a ppp interface unit - called when the owning file
2780 * (the one that created the unit) is closed or detached.
2781 */
2782static void ppp_shutdown_interface(struct ppp *ppp)
2783{
2784 struct ppp_net *pn;
2785
2786 pn = ppp_pernet(ppp->ppp_net);
2787 mutex_lock(&pn->all_ppp_mutex);
2788
2789 /* This will call dev_close() for us. */
2790 ppp_lock(ppp);
2791 if (!ppp->closing) {
2792 ppp->closing = 1;
2793 ppp_unlock(ppp);
2794 unregister_netdev(ppp->dev);
2795 unit_put(&pn->units_idr, ppp->file.index);
2796 } else
2797 ppp_unlock(ppp);
2798
2799 ppp->file.dead = 1;
2800 ppp->owner = NULL;
2801 wake_up_interruptible(&ppp->file.rwait);
2802
2803 mutex_unlock(&pn->all_ppp_mutex);
2804}
2805
2806/*
2807 * Free the memory used by a ppp unit. This is only called once 2813 * Free the memory used by a ppp unit. This is only called once
2808 * there are no channels connected to the unit and no file structs 2814 * there are no channels connected to the unit and no file structs
2809 * that reference the unit. 2815 * that reference the unit.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d43460ce3c7..64a60afbe50c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
788 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
788 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 789 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
789 790
790 /* 4. Gobi 1000 devices */ 791 /* 4. Gobi 1000 devices */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7fbca37a1adf..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1756 /* Do we support "hardware" checksums? */ 1756 /* Do we support "hardware" checksums? */
1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1758 /* This opens up the world of extra features. */ 1758 /* This opens up the world of extra features. */
1759 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1759 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1760 if (csum) 1760 if (csum)
1761 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1761 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1762 1762
1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
589 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
590 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
591 chan->netdev->dma = chan->cosa->dma; 591 chan->netdev->dma = chan->cosa->dma;
592 if (register_hdlc_device(chan->netdev)) { 592 err = register_hdlc_device(chan->netdev);
593 if (err) {
593 netdev_warn(chan->netdev, 594 netdev_warn(chan->netdev,
594 "register_hdlc_device() failed\n"); 595 "register_hdlc_device() failed\n");
595 free_netdev(chan->netdev); 596 free_netdev(chan->netdev);
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
3728 switch (phy->rev) { 3728 switch (phy->rev) {
3729 case 6: 3729 case 6:
3730 case 5: 3730 case 5:
3731 if (sprom->fem.ghz5.extpa_gain == 3) 3731 if (sprom->fem.ghz2.extpa_gain == 3)
3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; 3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
3733 /* fall through */ 3733 /* fall through */
3734 case 4: 3734 case 4:
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5000bfcded61..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1023 cmd->scan_priority = 1023 cmd->scan_priority =
1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1025 1025
1026 if (iwl_mvm_scan_total_iterations(params) == 0) 1026 if (iwl_mvm_scan_total_iterations(params) == 1)
1027 cmd->ooc_priority = 1027 cmd->ooc_priority =
1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1029 else 1029 else
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 6203c4ad9bba..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME); 480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
482 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED);
482 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 484 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
483 CSR_HW_IF_CONFIG_REG_PREPARE | 485 CSR_HW_IF_CONFIG_REG_PREPARE |
484 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 486 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 mdelay(1);
488 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED);
490 }
485 mdelay(5); 491 mdelay(5);
486 } 492 }
487 493
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
575 if (ret >= 0) 581 if (ret >= 0)
576 return 0; 582 return 0;
577 583
584 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED);
586 msleep(1);
587
578 for (iter = 0; iter < 10; iter++) { 588 for (iter = 0; iter < 10; iter++) {
579 /* If HW is not ready, prepare the conditions to check again */ 589 /* If HW is not ready, prepare the conditions to check again */
580 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
582 592
583 do { 593 do {
584 ret = iwl_pcie_set_hw_ready(trans); 594 ret = iwl_pcie_set_hw_ready(trans);
585 if (ret >= 0) 595 if (ret >= 0) {
586 return 0; 596 ret = 0;
597 goto out;
598 }
587 599
588 usleep_range(200, 1000); 600 usleep_range(200, 1000);
589 t += 200; 601 t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
593 605
594 IWL_ERR(trans, "Couldn't prepare the card\n"); 606 IWL_ERR(trans, "Couldn't prepare the card\n");
595 607
608out:
609 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
610 CSR_RESET_LINK_PWR_MGMT_DISABLED);
611
596 return ret; 612 return ret;
597} 613}
598 614
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1875 1875
1876 /* start timer if queue currently empty */ 1876 /* start timer if queue currently empty */
1877 if (q->read_ptr == q->write_ptr) { 1877 if (q->read_ptr == q->write_ptr) {
1878 if (txq->wd_timeout) 1878 if (txq->wd_timeout) {
1879 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1879 /*
1880 * If the TXQ is active, then set the timer, if not,
1881 * set the timer in remainder so that the timer will
1882 * be armed with the right value when the station will
1883 * wake up.
1884 */
1885 if (!txq->frozen)
1886 mod_timer(&txq->stuck_timer,
1887 jiffies + txq->wd_timeout);
1888 else
1889 txq->frozen_expiry_remainder = txq->wd_timeout;
1890 }
1880 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1891 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1881 iwl_trans_pcie_ref(trans); 1892 iwl_trans_pcie_ref(trans);
1882 } 1893 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev; 172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len; 173 u32 len;
174 u32 num_blocks; 174 u32 num_blocks;
175 const u8 *fw;
175 const struct firmware *fw_entry = NULL; 176 const struct firmware *fw_entry = NULL;
176 u32 block_size = dev->tx_blk_size; 177 u32 block_size = dev->tx_blk_size;
177 int status = 0; 178 int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
200 return status; 201 return status;
201 } 202 }
202 203
204 /* Copy firmware into DMA-accessible memory */
205 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
206 if (!fw)
207 return -ENOMEM;
203 len = fw_entry->size; 208 len = fw_entry->size;
204 209
205 if (len % 4) 210 if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
210 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); 215 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
211 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 216 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
212 217
213 status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); 218 status = rsi_copy_to_card(common, fw, len, num_blocks);
219 kfree(fw);
214 release_firmware(fw_entry); 220 release_firmware(fw_entry);
215 return status; 221 return status;
216} 222}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
146 return status; 146 return status;
147 } 147 }
148 148
149 /* Copy firmware into DMA-accessible memory */
149 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); 150 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
151 if (!fw)
152 return -ENOMEM;
150 len = fw_entry->size; 153 len = fw_entry->size;
151 154
152 if (len % 4) 155 if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
158 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 161 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
159 162
160 status = rsi_copy_to_card(common, fw, len, num_blocks); 163 status = rsi_copy_to_card(common, fw, len, num_blocks);
164 kfree(fw);
161 release_firmware(fw_entry); 165 release_firmware(fw_entry);
162 return status; 166 return status;
163} 167}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
1015{ 1015{
1016 struct rtl_priv *rtlpriv = rtl_priv(hw); 1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1018 struct rtl_tcb_desc tcb_desc;
1018 1019
1019 if (skb) 1020 if (skb) {
1020 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); 1021 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1022 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
1023 }
1021} 1024}
1022 1025
1023static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1026static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
388module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 389module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
389 bool, 0444); 390 bool, 0444);
390MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 391MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{ 62{
63 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
64} 70}
65 71
66int xenvif_schedulable(struct xenvif *vif) 72int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7d50711476fe..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, 810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
811 struct sk_buff *skb, 811 struct sk_buff *skb,
812 struct xen_netif_tx_request *txp, 812 struct xen_netif_tx_request *txp,
813 struct gnttab_map_grant_ref *gop) 813 struct gnttab_map_grant_ref *gop,
814 unsigned int frag_overflow,
815 struct sk_buff *nskb)
814{ 816{
815 struct skb_shared_info *shinfo = skb_shinfo(skb); 817 struct skb_shared_info *shinfo = skb_shinfo(skb);
816 skb_frag_t *frags = shinfo->frags; 818 skb_frag_t *frags = shinfo->frags;
817 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 819 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
818 int start; 820 int start;
819 pending_ring_idx_t index; 821 pending_ring_idx_t index;
820 unsigned int nr_slots, frag_overflow = 0; 822 unsigned int nr_slots;
821 823
822 /* At this point shinfo->nr_frags is in fact the number of
823 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
824 */
825 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
826 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
827 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
828 shinfo->nr_frags = MAX_SKB_FRAGS;
829 }
830 nr_slots = shinfo->nr_frags; 824 nr_slots = shinfo->nr_frags;
831 825
832 /* Skip first skb fragment if it is on same page as header fragment. */ 826 /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
841 } 835 }
842 836
843 if (frag_overflow) { 837 if (frag_overflow) {
844 struct sk_buff *nskb = xenvif_alloc_skb(0);
845 if (unlikely(nskb == NULL)) {
846 if (net_ratelimit())
847 netdev_err(queue->vif->dev,
848 "Can't allocate the frag_list skb.\n");
849 return NULL;
850 }
851 838
852 shinfo = skb_shinfo(nskb); 839 shinfo = skb_shinfo(nskb);
853 frags = shinfo->frags; 840 frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1175 unsigned *copy_ops, 1162 unsigned *copy_ops,
1176 unsigned *map_ops) 1163 unsigned *map_ops)
1177{ 1164{
1178 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; 1165 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1179 struct sk_buff *skb; 1166 struct sk_buff *skb, *nskb;
1180 int ret; 1167 int ret;
1168 unsigned int frag_overflow;
1181 1169
1182 while (skb_queue_len(&queue->tx_queue) < budget) { 1170 while (skb_queue_len(&queue->tx_queue) < budget) {
1183 struct xen_netif_tx_request txreq; 1171 struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1265 break; 1253 break;
1266 } 1254 }
1267 1255
1256 skb_shinfo(skb)->nr_frags = ret;
1257 if (data_len < txreq.size)
1258 skb_shinfo(skb)->nr_frags++;
1259 /* At this point shinfo->nr_frags is in fact the number of
1260 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1261 */
1262 frag_overflow = 0;
1263 nskb = NULL;
1264 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1265 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1266 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1267 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1268 nskb = xenvif_alloc_skb(0);
1269 if (unlikely(nskb == NULL)) {
1270 kfree_skb(skb);
1271 xenvif_tx_err(queue, &txreq, idx);
1272 if (net_ratelimit())
1273 netdev_err(queue->vif->dev,
1274 "Can't allocate the frag_list skb.\n");
1275 break;
1276 }
1277 }
1278
1268 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1269 struct xen_netif_extra_info *gso; 1280 struct xen_netif_extra_info *gso;
1270 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1272 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1273 /* Failure in xenvif_set_skb_gso is fatal. */ 1284 /* Failure in xenvif_set_skb_gso is fatal. */
1274 kfree_skb(skb); 1285 kfree_skb(skb);
1286 kfree_skb(nskb);
1275 break; 1287 break;
1276 } 1288 }
1277 } 1289 }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 1306
1295 (*copy_ops)++; 1307 (*copy_ops)++;
1296 1308
1297 skb_shinfo(skb)->nr_frags = ret;
1298 if (data_len < txreq.size) { 1309 if (data_len < txreq.size) {
1299 skb_shinfo(skb)->nr_frags++;
1300 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1310 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1301 pending_idx); 1311 pending_idx);
1302 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1312 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1310 1320
1311 queue->pending_cons++; 1321 queue->pending_cons++;
1312 1322
1313 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); 1323 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1314 if (request_gop == NULL) { 1324 frag_overflow, nskb);
1315 kfree_skb(skb);
1316 xenvif_tx_err(queue, &txreq, idx);
1317 break;
1318 }
1319 gop = request_gop;
1320 1325
1321 __skb_queue_tail(&queue->tx_queue, skb); 1326 __skb_queue_tail(&queue->tx_queue, skb);
1322 1327
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1536 smp_wmb(); 1541 smp_wmb();
1537 queue->dealloc_prod++; 1542 queue->dealloc_prod++;
1538 } while (ubuf); 1543 } while (ubuf);
1539 wake_up(&queue->dealloc_wq);
1540 spin_unlock_irqrestore(&queue->callback_lock, flags); 1544 spin_unlock_irqrestore(&queue->callback_lock, flags);
1541 1545
1542 if (likely(zerocopy_success)) 1546 if (likely(zerocopy_success))
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
114 ntb->dev.bus = &ntb_bus; 114 ntb->dev.bus = &ntb_bus;
115 ntb->dev.parent = &ntb->pdev->dev; 115 ntb->dev.parent = &ntb->pdev->dev;
116 ntb->dev.release = ntb_dev_release; 116 ntb->dev.release = ntb_dev_release;
117 dev_set_name(&ntb->dev, pci_name(ntb->pdev)); 117 dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
118 118
119 ntb->ctx = NULL; 119 ntb->ctx = NULL;
120 ntb->ctx_ops = NULL; 120 ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..1c6386d5f79c 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
142 142
143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void *data, int len); 144 void *data, int len);
145 struct list_head rx_post_q;
145 struct list_head rx_pend_q; 146 struct list_head rx_pend_q;
146 struct list_head rx_free_q; 147 struct list_head rx_free_q;
147 spinlock_t ntb_rx_pend_q_lock; 148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
148 spinlock_t ntb_rx_free_q_lock; 149 spinlock_t ntb_rx_q_lock;
149 void *rx_buff; 150 void *rx_buff;
150 unsigned int rx_index; 151 unsigned int rx_index;
151 unsigned int rx_max_entry; 152 unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
211 bool link_is_up; 212 bool link_is_up;
212 struct delayed_work link_work; 213 struct delayed_work link_work;
213 struct work_struct link_cleanup; 214 struct work_struct link_cleanup;
215
216 struct dentry *debugfs_node_dir;
214}; 217};
215 218
216enum { 219enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
436 char *buf; 439 char *buf;
437 ssize_t ret, out_offset, out_count; 440 ssize_t ret, out_offset, out_count;
438 441
442 qp = filp->private_data;
443
444 if (!qp || !qp->link_is_up)
445 return 0;
446
439 out_count = 1000; 447 out_count = 1000;
440 448
441 buf = kmalloc(out_count, GFP_KERNEL); 449 buf = kmalloc(out_count, GFP_KERNEL);
442 if (!buf) 450 if (!buf)
443 return -ENOMEM; 451 return -ENOMEM;
444 452
445 qp = filp->private_data;
446 out_offset = 0; 453 out_offset = 0;
447 out_offset += snprintf(buf + out_offset, out_count - out_offset, 454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "NTB QP stats\n"); 455 "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
534 return entry; 541 return entry;
535} 542}
536 543
544static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
545 struct list_head *list,
546 struct list_head *to_list)
547{
548 struct ntb_queue_entry *entry;
549 unsigned long flags;
550
551 spin_lock_irqsave(lock, flags);
552
553 if (list_empty(list)) {
554 entry = NULL;
555 } else {
556 entry = list_first_entry(list, struct ntb_queue_entry, entry);
557 list_move_tail(&entry->entry, to_list);
558 }
559
560 spin_unlock_irqrestore(lock, flags);
561
562 return entry;
563}
564
537static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 565static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
538 unsigned int qp_num) 566 unsigned int qp_num)
539{ 567{
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
601} 629}
602 630
603static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 631static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
604 unsigned int size) 632 resource_size_t size)
605{ 633{
606 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 634 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
607 struct pci_dev *pdev = nt->ndev->pdev; 635 struct pci_dev *pdev = nt->ndev->pdev;
608 unsigned int xlat_size, buff_size; 636 size_t xlat_size, buff_size;
609 int rc; 637 int rc;
610 638
639 if (!size)
640 return -EINVAL;
641
611 xlat_size = round_up(size, mw->xlat_align_size); 642 xlat_size = round_up(size, mw->xlat_align_size);
612 buff_size = round_up(size, mw->xlat_align); 643 buff_size = round_up(size, mw->xlat_align);
613 644
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
627 if (!mw->virt_addr) { 658 if (!mw->virt_addr) {
628 mw->xlat_size = 0; 659 mw->xlat_size = 0;
629 mw->buff_size = 0; 660 mw->buff_size = 0;
630 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", 661 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
631 buff_size); 662 buff_size);
632 return -ENOMEM; 663 return -ENOMEM;
633 } 664 }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
867 898
868 if (qp->event_handler) 899 if (qp->event_handler)
869 qp->event_handler(qp->cb_data, qp->link_is_up); 900 qp->event_handler(qp->cb_data, qp->link_is_up);
901
902 tasklet_schedule(&qp->rxc_db_work);
870 } else if (nt->link_is_up) 903 } else if (nt->link_is_up)
871 schedule_delayed_work(&qp->link_work, 904 schedule_delayed_work(&qp->link_work,
872 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
923 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 956 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
924 qp->tx_max_entry = tx_size / qp->tx_max_frame; 957 qp->tx_max_entry = tx_size / qp->tx_max_frame;
925 958
926 if (nt_debugfs_dir) { 959 if (nt->debugfs_node_dir) {
927 char debugfs_name[4]; 960 char debugfs_name[4];
928 961
929 snprintf(debugfs_name, 4, "qp%d", qp_num); 962 snprintf(debugfs_name, 4, "qp%d", qp_num);
930 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 963 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
931 nt_debugfs_dir); 964 nt->debugfs_node_dir);
932 965
933 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 966 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
934 qp->debugfs_dir, qp, 967 qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
941 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 974 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
942 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 975 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
943 976
944 spin_lock_init(&qp->ntb_rx_pend_q_lock); 977 spin_lock_init(&qp->ntb_rx_q_lock);
945 spin_lock_init(&qp->ntb_rx_free_q_lock);
946 spin_lock_init(&qp->ntb_tx_free_q_lock); 978 spin_lock_init(&qp->ntb_tx_free_q_lock);
947 979
980 INIT_LIST_HEAD(&qp->rx_post_q);
948 INIT_LIST_HEAD(&qp->rx_pend_q); 981 INIT_LIST_HEAD(&qp->rx_pend_q);
949 INIT_LIST_HEAD(&qp->rx_free_q); 982 INIT_LIST_HEAD(&qp->rx_free_q);
950 INIT_LIST_HEAD(&qp->tx_free_q); 983 INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1031 goto err2; 1064 goto err2;
1032 } 1065 }
1033 1066
1067 if (nt_debugfs_dir) {
1068 nt->debugfs_node_dir =
1069 debugfs_create_dir(pci_name(ndev->pdev),
1070 nt_debugfs_dir);
1071 }
1072
1034 for (i = 0; i < qp_count; i++) { 1073 for (i = 0; i < qp_count; i++) {
1035 rc = ntb_transport_init_queue(nt, i); 1074 rc = ntb_transport_init_queue(nt, i);
1036 if (rc) 1075 if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1107 kfree(nt); 1146 kfree(nt);
1108} 1147}
1109 1148
1110static void ntb_rx_copy_callback(void *data) 1149static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1111{ 1150{
1112 struct ntb_queue_entry *entry = data; 1151 struct ntb_queue_entry *entry;
1113 struct ntb_transport_qp *qp = entry->qp; 1152 void *cb_data;
1114 void *cb_data = entry->cb_data; 1153 unsigned int len;
1115 unsigned int len = entry->len; 1154 unsigned long irqflags;
1116 struct ntb_payload_header *hdr = entry->rx_hdr; 1155
1156 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1157
1158 while (!list_empty(&qp->rx_post_q)) {
1159 entry = list_first_entry(&qp->rx_post_q,
1160 struct ntb_queue_entry, entry);
1161 if (!(entry->flags & DESC_DONE_FLAG))
1162 break;
1163
1164 entry->rx_hdr->flags = 0;
1165 iowrite32(entry->index, &qp->rx_info->entry);
1117 1166
1118 hdr->flags = 0; 1167 cb_data = entry->cb_data;
1168 len = entry->len;
1119 1169
1120 iowrite32(entry->index, &qp->rx_info->entry); 1170 list_move_tail(&entry->entry, &qp->rx_free_q);
1121 1171
1122 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1172 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1123 1173
1124 if (qp->rx_handler && qp->client_ready) 1174 if (qp->rx_handler && qp->client_ready)
1125 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1175 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1176
1177 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1178 }
1179
1180 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1181}
1182
1183static void ntb_rx_copy_callback(void *data)
1184{
1185 struct ntb_queue_entry *entry = data;
1186
1187 entry->flags |= DESC_DONE_FLAG;
1188
1189 ntb_complete_rxc(entry->qp);
1126} 1190}
1127 1191
1128static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1192static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1138 ntb_rx_copy_callback(entry); 1202 ntb_rx_copy_callback(entry);
1139} 1203}
1140 1204
1141static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, 1205static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1142 size_t len)
1143{ 1206{
1144 struct dma_async_tx_descriptor *txd; 1207 struct dma_async_tx_descriptor *txd;
1145 struct ntb_transport_qp *qp = entry->qp; 1208 struct ntb_transport_qp *qp = entry->qp;
1146 struct dma_chan *chan = qp->dma_chan; 1209 struct dma_chan *chan = qp->dma_chan;
1147 struct dma_device *device; 1210 struct dma_device *device;
1148 size_t pay_off, buff_off; 1211 size_t pay_off, buff_off, len;
1149 struct dmaengine_unmap_data *unmap; 1212 struct dmaengine_unmap_data *unmap;
1150 dma_cookie_t cookie; 1213 dma_cookie_t cookie;
1151 void *buf = entry->buf; 1214 void *buf = entry->buf;
1152 1215
1153 entry->len = len; 1216 len = entry->len;
1154 1217
1155 if (!chan) 1218 if (!chan)
1156 goto err; 1219 goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1226 struct ntb_payload_header *hdr; 1289 struct ntb_payload_header *hdr;
1227 struct ntb_queue_entry *entry; 1290 struct ntb_queue_entry *entry;
1228 void *offset; 1291 void *offset;
1229 int rc;
1230 1292
1231 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1293 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1232 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1294 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1255 return -EIO; 1317 return -EIO;
1256 } 1318 }
1257 1319
1258 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1320 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1259 if (!entry) { 1321 if (!entry) {
1260 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1322 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1261 qp->rx_err_no_buf++; 1323 qp->rx_err_no_buf++;
1262 1324 return -EAGAIN;
1263 rc = -ENOMEM;
1264 goto err;
1265 } 1325 }
1266 1326
1327 entry->rx_hdr = hdr;
1328 entry->index = qp->rx_index;
1329
1267 if (hdr->len > entry->len) { 1330 if (hdr->len > entry->len) {
1268 dev_dbg(&qp->ndev->pdev->dev, 1331 dev_dbg(&qp->ndev->pdev->dev,
1269 "receive buffer overflow! Wanted %d got %d\n", 1332 "receive buffer overflow! Wanted %d got %d\n",
1270 hdr->len, entry->len); 1333 hdr->len, entry->len);
1271 qp->rx_err_oflow++; 1334 qp->rx_err_oflow++;
1272 1335
1273 rc = -EIO; 1336 entry->len = -EIO;
1274 goto err; 1337 entry->flags |= DESC_DONE_FLAG;
1275 }
1276 1338
1277 dev_dbg(&qp->ndev->pdev->dev, 1339 ntb_complete_rxc(qp);
1278 "RX OK index %u ver %u size %d into buf size %d\n", 1340 } else {
1279 qp->rx_index, hdr->ver, hdr->len, entry->len); 1341 dev_dbg(&qp->ndev->pdev->dev,
1342 "RX OK index %u ver %u size %d into buf size %d\n",
1343 qp->rx_index, hdr->ver, hdr->len, entry->len);
1280 1344
1281 qp->rx_bytes += hdr->len; 1345 qp->rx_bytes += hdr->len;
1282 qp->rx_pkts++; 1346 qp->rx_pkts++;
1283 1347
1284 entry->index = qp->rx_index; 1348 entry->len = hdr->len;
1285 entry->rx_hdr = hdr;
1286 1349
1287 ntb_async_rx(entry, offset, hdr->len); 1350 ntb_async_rx(entry, offset);
1351 }
1288 1352
1289 qp->rx_index++; 1353 qp->rx_index++;
1290 qp->rx_index %= qp->rx_max_entry; 1354 qp->rx_index %= qp->rx_max_entry;
1291 1355
1292 return 0; 1356 return 0;
1293
1294err:
1295 /* FIXME: if this syncrhonous update of the rx_index gets ahead of
1296 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
1297 * scenarios:
1298 *
1299 * 1) The peer might miss this update, but observe the update
1300 * from the memcpy completion callback. In this case, the buffer will
1301 * not be freed on the peer to be reused for a different packet. The
1302 * successful rx of a later packet would clear the condition, but the
1303 * condition could persist if several rx fail in a row.
1304 *
1305 * 2) The peer may observe this update before the asyncrhonous copy of
1306 * prior packets is completed. The peer may overwrite the buffers of
1307 * the prior packets before they are copied.
1308 *
1309 * 3) Both: the peer may observe the update, and then observe the index
1310 * decrement by the asynchronous completion callback. Who knows what
1311 * badness that will cause.
1312 */
1313 hdr->flags = 0;
1314 iowrite32(qp->rx_index, &qp->rx_info->entry);
1315
1316 return rc;
1317} 1357}
1318 1358
1319static void ntb_transport_rxc_db(unsigned long data) 1359static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
1333 break; 1373 break;
1334 } 1374 }
1335 1375
1336 if (qp->dma_chan) 1376 if (i && qp->dma_chan)
1337 dma_async_issue_pending(qp->dma_chan); 1377 dma_async_issue_pending(qp->dma_chan);
1338 1378
1339 if (i == qp->rx_max_entry) { 1379 if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1609 goto err1; 1649 goto err1;
1610 1650
1611 entry->qp = qp; 1651 entry->qp = qp;
1612 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, 1652 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1613 &qp->rx_free_q); 1653 &qp->rx_free_q);
1614 } 1654 }
1615 1655
@@ -1634,7 +1674,7 @@ err2:
1634 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1674 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1635 kfree(entry); 1675 kfree(entry);
1636err1: 1676err1:
1637 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1677 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1638 kfree(entry); 1678 kfree(entry);
1639 if (qp->dma_chan) 1679 if (qp->dma_chan)
1640 dma_release_channel(qp->dma_chan); 1680 dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1652 */ 1692 */
1653void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1693void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1654{ 1694{
1655 struct ntb_transport_ctx *nt = qp->transport;
1656 struct pci_dev *pdev; 1695 struct pci_dev *pdev;
1657 struct ntb_queue_entry *entry; 1696 struct ntb_queue_entry *entry;
1658 u64 qp_bit; 1697 u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1689 qp->tx_handler = NULL; 1728 qp->tx_handler = NULL;
1690 qp->event_handler = NULL; 1729 qp->event_handler = NULL;
1691 1730
1692 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1731 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1693 kfree(entry); 1732 kfree(entry);
1694 1733
1695 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { 1734 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1696 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); 1735 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1736 kfree(entry);
1737 }
1738
1739 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1740 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1697 kfree(entry); 1741 kfree(entry);
1698 } 1742 }
1699 1743
1700 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1744 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1701 kfree(entry); 1745 kfree(entry);
1702 1746
1703 nt->qp_bitmap_free |= qp_bit; 1747 qp->transport->qp_bitmap_free |= qp_bit;
1704 1748
1705 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1749 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1706} 1750}
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1724 if (!qp || qp->client_ready) 1768 if (!qp || qp->client_ready)
1725 return NULL; 1769 return NULL;
1726 1770
1727 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1771 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1728 if (!entry) 1772 if (!entry)
1729 return NULL; 1773 return NULL;
1730 1774
1731 buf = entry->cb_data; 1775 buf = entry->cb_data;
1732 *len = entry->len; 1776 *len = entry->len;
1733 1777
1734 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1778 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1735 1779
1736 return buf; 1780 return buf;
1737} 1781}
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1757 if (!qp) 1801 if (!qp)
1758 return -EINVAL; 1802 return -EINVAL;
1759 1803
1760 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); 1804 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1761 if (!entry) 1805 if (!entry)
1762 return -ENOMEM; 1806 return -ENOMEM;
1763 1807
1764 entry->cb_data = cb; 1808 entry->cb_data = cb;
1765 entry->buf = data; 1809 entry->buf = data;
1766 entry->len = len; 1810 entry->len = len;
1811 entry->flags = 0;
1812
1813 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1767 1814
1768 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1815 tasklet_schedule(&qp->rxc_db_work);
1769 1816
1770 return 0; 1817 return 0;
1771} 1818}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4efcbe6e..944f50015ed0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
2# PCI configuration 2# PCI configuration
3# 3#
4config PCI_BUS_ADDR_T_64BIT 4config PCI_BUS_ADDR_T_64BIT
5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) 5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
6 depends on PCI 6 depends on PCI
7 7
8config PCI_MSI 8config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636681b6..b978bbfe044c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
997 else if (type == PCI_EXP_TYPE_UPSTREAM || 997 else if (type == PCI_EXP_TYPE_UPSTREAM ||
998 type == PCI_EXP_TYPE_DOWNSTREAM) { 998 type == PCI_EXP_TYPE_DOWNSTREAM) {
999 parent = pci_upstream_bridge(pdev); 999 parent = pci_upstream_bridge(pdev);
1000 if (!parent->has_secondary_link) 1000
1001 /*
1002 * Usually there's an upstream device (Root Port or Switch
1003 * Downstream Port), but we can't assume one exists.
1004 */
1005 if (parent && !parent->has_secondary_link)
1001 pdev->has_secondary_link = 1; 1006 pdev->has_secondary_link = 1;
1002 } 1007 }
1003} 1008}
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig CHROME_PLATFORMS 5menuconfig CHROME_PLATFORMS
6 bool "Platform support for Chrome hardware" 6 bool "Platform support for Chrome hardware"
7 depends on X86 || ARM
8 ---help--- 7 ---help---
9 Say Y here to get to see options for platform support for 8 Say Y here to get to see options for platform support for
10 various Chromebooks and Chromeboxes. This option alone does 9 various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c351624..ce129e595b55 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.17" 42#define DRV_VERSION "1.6.0.17a"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286f1a9d..25436cd2860c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
425 unsigned long ptr; 425 unsigned long ptr;
426 struct fc_rport_priv *rdata; 426 struct fc_rport_priv *rdata;
427 spinlock_t *io_lock = NULL; 427 spinlock_t *io_lock = NULL;
428 int io_lock_acquired = 0;
428 429
429 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) 430 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
430 return SCSI_MLQUEUE_HOST_BUSY; 431 return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
518 spin_lock_irqsave(io_lock, flags); 519 spin_lock_irqsave(io_lock, flags);
519 520
520 /* initialize rest of io_req */ 521 /* initialize rest of io_req */
522 io_lock_acquired = 1;
521 io_req->port_id = rport->port_id; 523 io_req->port_id = rport->port_id;
522 io_req->start_time = jiffies; 524 io_req->start_time = jiffies;
523 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 525 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
571 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); 573 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
572 574
573 /* if only we issued IO, will we have the io lock */ 575 /* if only we issued IO, will we have the io lock */
574 if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED) 576 if (io_lock_acquired)
575 spin_unlock_irqrestore(io_lock, flags); 577 spin_unlock_irqrestore(io_lock, flags);
576 578
577 atomic_dec(&fnic->in_flight); 579 atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
733 if (resp) { 733 if (resp) {
734 resp(sp, fp, arg); 734 resp(sp, fp, arg);
735 res = true; 735 res = true;
736 } else if (!IS_ERR(fp)) {
737 fc_frame_free(fp);
738 } 736 }
739 737
740 spin_lock_bh(&ep->ex_lock); 738 spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1596 * If new exch resp handler is valid then call that 1594 * If new exch resp handler is valid then call that
1597 * first. 1595 * first.
1598 */ 1596 */
1599 fc_invoke_resp(ep, sp, fp); 1597 if (!fc_invoke_resp(ep, sp, fp))
1598 fc_frame_free(fp);
1600 1599
1601 fc_exch_release(ep); 1600 fc_exch_release(ep);
1602 return; 1601 return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1695 fc_exch_hold(ep); 1694 fc_exch_hold(ep);
1696 if (!rc) 1695 if (!rc)
1697 fc_exch_delete(ep); 1696 fc_exch_delete(ep);
1698 fc_invoke_resp(ep, sp, fp); 1697 if (!fc_invoke_resp(ep, sp, fp))
1698 fc_frame_free(fp);
1699 if (has_rec) 1699 if (has_rec)
1700 fc_exch_timer_set(ep, ep->r_a_tov); 1700 fc_exch_timer_set(ep, ep->r_a_tov);
1701 fc_exch_release(ep); 1701 fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..2d5909c4685c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@ restart:
1039 fc_fcp_pkt_hold(fsp); 1039 fc_fcp_pkt_hold(fsp);
1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1041 1041
1042 if (!fc_fcp_lock_pkt(fsp)) { 1042 spin_lock_bh(&fsp->scsi_pkt_lock);
1043 if (!(fsp->state & FC_SRB_COMPL)) {
1044 fsp->state |= FC_SRB_COMPL;
1045 /*
1046 * TODO: dropping scsi_pkt_lock and then reacquiring
1047 * again around fc_fcp_cleanup_cmd() is required,
1048 * since fc_fcp_cleanup_cmd() calls into
1049 * fc_seq_set_resp() and that func preempts cpu using
1050 * schedule. May be schedule and related code should be
1051 * removed instead of unlocking here to avoid scheduling
1052 * while atomic bug.
1053 */
1054 spin_unlock_bh(&fsp->scsi_pkt_lock);
1055
1043 fc_fcp_cleanup_cmd(fsp, error); 1056 fc_fcp_cleanup_cmd(fsp, error);
1057
1058 spin_lock_bh(&fsp->scsi_pkt_lock);
1044 fc_io_compl(fsp); 1059 fc_io_compl(fsp);
1045 fc_fcp_unlock_pkt(fsp);
1046 } 1060 }
1061 spin_unlock_bh(&fsp->scsi_pkt_lock);
1047 1062
1048 fc_fcp_pkt_release(fsp); 1063 fc_fcp_pkt_release(fsp);
1049 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1064 spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..98d9bb6ff725 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2941{ 2941{
2942 struct iscsi_conn *conn = cls_conn->dd_data; 2942 struct iscsi_conn *conn = cls_conn->dd_data;
2943 struct iscsi_session *session = conn->session; 2943 struct iscsi_session *session = conn->session;
2944 unsigned long flags;
2945 2944
2946 del_timer_sync(&conn->transport_timer); 2945 del_timer_sync(&conn->transport_timer);
2947 2946
2947 mutex_lock(&session->eh_mutex);
2948 spin_lock_bh(&session->frwd_lock); 2948 spin_lock_bh(&session->frwd_lock);
2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2950 if (session->leadconn == conn) { 2950 if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2956 } 2956 }
2957 spin_unlock_bh(&session->frwd_lock); 2957 spin_unlock_bh(&session->frwd_lock);
2958 2958
2959 /*
2960 * Block until all in-progress commands for this connection
2961 * time out or fail.
2962 */
2963 for (;;) {
2964 spin_lock_irqsave(session->host->host_lock, flags);
2965 if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
2966 spin_unlock_irqrestore(session->host->host_lock, flags);
2967 break;
2968 }
2969 spin_unlock_irqrestore(session->host->host_lock, flags);
2970 msleep_interruptible(500);
2971 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2972 "host_busy %d host_failed %d\n",
2973 atomic_read(&session->host->host_busy),
2974 session->host->host_failed);
2975 /*
2976 * force eh_abort() to unblock
2977 */
2978 wake_up(&conn->ehwait);
2979 }
2980
2981 /* flush queued up work because we free the connection below */ 2959 /* flush queued up work because we free the connection below */
2982 iscsi_suspend_tx(conn); 2960 iscsi_suspend_tx(conn);
2983 2961
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2994 if (session->leadconn == conn) 2972 if (session->leadconn == conn)
2995 session->leadconn = NULL; 2973 session->leadconn = NULL;
2996 spin_unlock_bh(&session->frwd_lock); 2974 spin_unlock_bh(&session->frwd_lock);
2975 mutex_unlock(&session->eh_mutex);
2997 2976
2998 iscsi_destroy_conn(cls_conn); 2977 iscsi_destroy_conn(cls_conn);
2999} 2978}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cfadccef045c..6457a8a0db9c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/jiffies.h> 28#include <linux/jiffies.h>
29#include <asm/unaligned.h>
30 29
31#include <scsi/scsi.h> 30#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2523 } 2522 }
2524} 2523}
2525EXPORT_SYMBOL(scsi_build_sense_buffer); 2524EXPORT_SYMBOL(scsi_build_sense_buffer);
2526
2527/**
2528 * scsi_set_sense_information - set the information field in a
2529 * formatted sense data buffer
2530 * @buf: Where to build sense data
2531 * @info: 64-bit information value to be set
2532 *
2533 **/
2534void scsi_set_sense_information(u8 *buf, u64 info)
2535{
2536 if ((buf[0] & 0x7f) == 0x72) {
2537 u8 *ucp, len;
2538
2539 len = buf[7];
2540 ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
2541 if (!ucp) {
2542 buf[7] = len + 0xa;
2543 ucp = buf + 8 + len;
2544 }
2545 ucp[0] = 0;
2546 ucp[1] = 0xa;
2547 ucp[2] = 0x80; /* Valid bit */
2548 ucp[3] = 0;
2549 put_unaligned_be64(info, &ucp[4]);
2550 } else if ((buf[0] & 0x7f) == 0x70) {
2551 buf[0] |= 0x80;
2552 put_unaligned_be64(info, &buf[3]);
2553 }
2554}
2555EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1d2163..e4b799837948 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
217{ 217{
218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
219 struct scsi_device *sdev = to_scsi_device(dev); 219 struct scsi_device *sdev = to_scsi_device(dev);
220 int err; 220 int err = 0;
221 221
222 err = blk_pre_runtime_suspend(sdev->request_queue); 222 if (pm && pm->runtime_suspend) {
223 if (err) 223 err = blk_pre_runtime_suspend(sdev->request_queue);
224 return err; 224 if (err)
225 if (pm && pm->runtime_suspend) 225 return err;
226 err = pm->runtime_suspend(dev); 226 err = pm->runtime_suspend(dev);
227 blk_post_runtime_suspend(sdev->request_queue, err); 227 blk_post_runtime_suspend(sdev->request_queue, err);
228 228 }
229 return err; 229 return err;
230} 230}
231 231
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249 int err = 0; 249 int err = 0;
250 250
251 blk_pre_runtime_resume(sdev->request_queue); 251 if (pm && pm->runtime_resume) {
252 if (pm && pm->runtime_resume) 252 blk_pre_runtime_resume(sdev->request_queue);
253 err = pm->runtime_resume(dev); 253 err = pm->runtime_resume(dev);
254 blk_post_runtime_resume(sdev->request_queue, err); 254 blk_post_runtime_resume(sdev->request_queue, err);
255 255 }
256 return err; 256 return err;
257} 257}
258 258
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..a20da8c25b4f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2770 max_xfer = sdkp->max_xfer_blocks; 2770 max_xfer = sdkp->max_xfer_blocks;
2771 max_xfer <<= ilog2(sdp->sector_size) - 9; 2771 max_xfer <<= ilog2(sdp->sector_size) - 9;
2772 2772
2773 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2773 sdkp->disk->queue->limits.max_sectors =
2774 max_xfer); 2774 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
2775 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2775
2776 set_capacity(disk, sdkp->capacity); 2776 set_capacity(disk, sdkp->capacity);
2777 sd_config_write_same(sdkp); 2777 sd_config_write_same(sdkp);
2778 kfree(buffer); 2778 kfree(buffer);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cd77a064c772..fd092909a457 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
968 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 968 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
969 969
970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
971 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 971 if (hdr->flags & ISCSI_FLAG_CMD_READ)
972 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 972 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
973 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 973 else
974 cmd->targ_xfer_tag = 0xFFFFFFFF; 974 cmd->targ_xfer_tag = 0xFFFFFFFF;
975 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 975 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
976 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 976 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c2e9fea90b4a..860e84046177 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
457 if (!strcmp(t->tf_ops->name, fo->name)) { 457 if (!strcmp(t->tf_ops->name, fo->name)) {
458 BUG_ON(atomic_read(&t->tf_access_cnt)); 458 BUG_ON(atomic_read(&t->tf_access_cnt));
459 list_del(&t->tf_list); 459 list_del(&t->tf_list);
460 mutex_unlock(&g_tf_lock);
461 /*
462 * Wait for any outstanding fabric se_deve_entry->rcu_head
463 * callbacks to complete post kfree_rcu(), before allowing
464 * fabric driver unload of TFO->module to proceed.
465 */
466 rcu_barrier();
460 kfree(t); 467 kfree(t);
461 break; 468 return;
462 } 469 }
463 } 470 }
464 mutex_unlock(&g_tf_lock); 471 mutex_unlock(&g_tf_lock);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8e70a8..be9cefc07407 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
84 list_for_each_entry(tb, &backend_list, list) { 84 list_for_each_entry(tb, &backend_list, list) {
85 if (tb->ops == ops) { 85 if (tb->ops == ops) {
86 list_del(&tb->list); 86 list_del(&tb->list);
87 mutex_unlock(&backend_mutex);
88 /*
89 * Wait for any outstanding backend driver ->rcu_head
90 * callbacks to complete post TBO->free_device() ->
91 * call_rcu(), before allowing backend driver module
92 * unload of target_backend_ops->owner to proceed.
93 */
94 rcu_barrier();
87 kfree(tb); 95 kfree(tb);
88 break; 96 return;
89 } 97 }
90 } 98 }
91 mutex_unlock(&backend_mutex); 99 mutex_unlock(&backend_mutex);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b5ba1ec3c354..f87d4cef6d39 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1203 struct se_dev_entry *deve; 1203 struct se_dev_entry *deve;
1204 struct se_session *sess = cmd->se_sess; 1204 struct se_session *sess = cmd->se_sess;
1205 struct se_node_acl *nacl; 1205 struct se_node_acl *nacl;
1206 struct scsi_lun slun;
1206 unsigned char *buf; 1207 unsigned char *buf;
1207 u32 lun_count = 0, offset = 8; 1208 u32 lun_count = 0, offset = 8;
1208 1209 __be32 len;
1209 if (cmd->data_length < 16) {
1210 pr_warn("REPORT LUNS allocation length %u too small\n",
1211 cmd->data_length);
1212 return TCM_INVALID_CDB_FIELD;
1213 }
1214 1210
1215 buf = transport_kmap_data_sg(cmd); 1211 buf = transport_kmap_data_sg(cmd);
1216 if (!buf) 1212 if (cmd->data_length && !buf)
1217 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1213 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1218 1214
1219 /* 1215 /*
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1221 * coming via a target_core_mod PASSTHROUGH op, and not through 1217 * coming via a target_core_mod PASSTHROUGH op, and not through
1222 * a $FABRIC_MOD. In that case, report LUN=0 only. 1218 * a $FABRIC_MOD. In that case, report LUN=0 only.
1223 */ 1219 */
1224 if (!sess) { 1220 if (!sess)
1225 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1226 lun_count = 1;
1227 goto done; 1221 goto done;
1228 } 1222
1229 nacl = sess->se_node_acl; 1223 nacl = sess->se_node_acl;
1230 1224
1231 rcu_read_lock(); 1225 rcu_read_lock();
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1236 * See SPC2-R20 7.19. 1230 * See SPC2-R20 7.19.
1237 */ 1231 */
1238 lun_count++; 1232 lun_count++;
1239 if ((offset + 8) > cmd->data_length) 1233 if (offset >= cmd->data_length)
1240 continue; 1234 continue;
1241 1235
1242 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1236 int_to_scsilun(deve->mapped_lun, &slun);
1237 memcpy(buf + offset, &slun,
1238 min(8u, cmd->data_length - offset));
1243 offset += 8; 1239 offset += 8;
1244 } 1240 }
1245 rcu_read_unlock(); 1241 rcu_read_unlock();
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1248 * See SPC3 r07, page 159. 1244 * See SPC3 r07, page 159.
1249 */ 1245 */
1250done: 1246done:
1251 lun_count *= 8; 1247 /*
1252 buf[0] = ((lun_count >> 24) & 0xff); 1248 * If no LUNs are accessible, report virtual LUN 0.
1253 buf[1] = ((lun_count >> 16) & 0xff); 1249 */
1254 buf[2] = ((lun_count >> 8) & 0xff); 1250 if (lun_count == 0) {
1255 buf[3] = (lun_count & 0xff); 1251 int_to_scsilun(0, &slun);
1256 transport_kunmap_data_sg(cmd); 1252 if (cmd->data_length > 8)
1253 memcpy(buf + offset, &slun,
1254 min(8u, cmd->data_length - offset));
1255 lun_count = 1;
1256 }
1257
1258 if (buf) {
1259 len = cpu_to_be32(lun_count * 8);
1260 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
1261 transport_kunmap_data_sg(cmd);
1262 }
1257 1263
1258 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1264 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
1259 return 0; 1265 return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61b9648..620dcd405ff6 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@ struct power_table {
68 * registered cooling device. 68 * registered cooling device.
69 * @cpufreq_state: integer value representing the current state of cpufreq 69 * @cpufreq_state: integer value representing the current state of cpufreq
70 * cooling devices. 70 * cooling devices.
71 * @cpufreq_val: integer value representing the absolute value of the clipped 71 * @clipped_freq: integer value representing the absolute value of the clipped
72 * frequency. 72 * frequency.
73 * @max_level: maximum cooling level. One less than total number of valid 73 * @max_level: maximum cooling level. One less than total number of valid
74 * cpufreq frequencies. 74 * cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
91 int id; 91 int id;
92 struct thermal_cooling_device *cool_dev; 92 struct thermal_cooling_device *cool_dev;
93 unsigned int cpufreq_state; 93 unsigned int cpufreq_state;
94 unsigned int cpufreq_val; 94 unsigned int clipped_freq;
95 unsigned int max_level; 95 unsigned int max_level;
96 unsigned int *freq_table; /* In descending order */ 96 unsigned int *freq_table; /* In descending order */
97 struct cpumask allowed_cpus; 97 struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
107static DEFINE_IDR(cpufreq_idr); 107static DEFINE_IDR(cpufreq_idr);
108static DEFINE_MUTEX(cooling_cpufreq_lock); 108static DEFINE_MUTEX(cooling_cpufreq_lock);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock);
110static LIST_HEAD(cpufreq_dev_list); 113static LIST_HEAD(cpufreq_dev_list);
111 114
112/** 115/**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
185{ 188{
186 struct cpufreq_cooling_device *cpufreq_dev; 189 struct cpufreq_cooling_device *cpufreq_dev;
187 190
188 mutex_lock(&cooling_cpufreq_lock); 191 mutex_lock(&cooling_list_lock);
189 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 192 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
190 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { 193 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
191 mutex_unlock(&cooling_cpufreq_lock); 194 mutex_unlock(&cooling_list_lock);
192 return get_level(cpufreq_dev, freq); 195 return get_level(cpufreq_dev, freq);
193 } 196 }
194 } 197 }
195 mutex_unlock(&cooling_cpufreq_lock); 198 mutex_unlock(&cooling_list_lock);
196 199
197 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); 200 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
198 return THERMAL_CSTATE_INVALID; 201 return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
215 unsigned long event, void *data) 218 unsigned long event, void *data)
216{ 219{
217 struct cpufreq_policy *policy = data; 220 struct cpufreq_policy *policy = data;
218 unsigned long max_freq = 0; 221 unsigned long clipped_freq;
219 struct cpufreq_cooling_device *cpufreq_dev; 222 struct cpufreq_cooling_device *cpufreq_dev;
220 223
221 switch (event) { 224 if (event != CPUFREQ_ADJUST)
225 return NOTIFY_DONE;
222 226
223 case CPUFREQ_ADJUST: 227 mutex_lock(&cooling_list_lock);
224 mutex_lock(&cooling_cpufreq_lock); 228 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
225 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 229 if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
226 if (!cpumask_test_cpu(policy->cpu, 230 continue;
227 &cpufreq_dev->allowed_cpus))
228 continue;
229 231
230 max_freq = cpufreq_dev->cpufreq_val; 232 /*
233 * policy->max is the maximum allowed frequency defined by user
234 * and clipped_freq is the maximum that thermal constraints
235 * allow.
236 *
237 * If clipped_freq is lower than policy->max, then we need to
238 * readjust policy->max.
239 *
240 * But, if clipped_freq is greater than policy->max, we don't
241 * need to do anything.
242 */
243 clipped_freq = cpufreq_dev->clipped_freq;
231 244
232 if (policy->max != max_freq) 245 if (policy->max > clipped_freq)
233 cpufreq_verify_within_limits(policy, 0, 246 cpufreq_verify_within_limits(policy, 0, clipped_freq);
234 max_freq);
235 }
236 mutex_unlock(&cooling_cpufreq_lock);
237 break; 247 break;
238 default:
239 return NOTIFY_DONE;
240 } 248 }
249 mutex_unlock(&cooling_list_lock);
241 250
242 return NOTIFY_OK; 251 return NOTIFY_OK;
243} 252}
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
519 528
520 clip_freq = cpufreq_device->freq_table[state]; 529 clip_freq = cpufreq_device->freq_table[state];
521 cpufreq_device->cpufreq_state = state; 530 cpufreq_device->cpufreq_state = state;
522 cpufreq_device->cpufreq_val = clip_freq; 531 cpufreq_device->clipped_freq = clip_freq;
523 532
524 cpufreq_update_policy(cpu); 533 cpufreq_update_policy(cpu);
525 534
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
861 pr_debug("%s: freq:%u KHz\n", __func__, freq); 870 pr_debug("%s: freq:%u KHz\n", __func__, freq);
862 } 871 }
863 872
864 cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0]; 873 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
865 cpufreq_dev->cool_dev = cool_dev; 874 cpufreq_dev->cool_dev = cool_dev;
866 875
867 mutex_lock(&cooling_cpufreq_lock); 876 mutex_lock(&cooling_cpufreq_lock);
868 877
878 mutex_lock(&cooling_list_lock);
879 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
880 mutex_unlock(&cooling_list_lock);
881
869 /* Register the notifier for first cpufreq cooling device */ 882 /* Register the notifier for first cpufreq cooling device */
870 if (list_empty(&cpufreq_dev_list)) 883 if (!cpufreq_dev_count++)
871 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 884 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
872 CPUFREQ_POLICY_NOTIFIER); 885 CPUFREQ_POLICY_NOTIFIER);
873 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
874
875 mutex_unlock(&cooling_cpufreq_lock); 886 mutex_unlock(&cooling_cpufreq_lock);
876 887
877 return cool_dev; 888 return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1013 return; 1024 return;
1014 1025
1015 cpufreq_dev = cdev->devdata; 1026 cpufreq_dev = cdev->devdata;
1016 mutex_lock(&cooling_cpufreq_lock);
1017 list_del(&cpufreq_dev->node);
1018 1027
1019 /* Unregister the notifier for the last cpufreq cooling device */ 1028 /* Unregister the notifier for the last cpufreq cooling device */
1020 if (list_empty(&cpufreq_dev_list)) 1029 mutex_lock(&cooling_cpufreq_lock);
1030 if (!--cpufreq_dev_count)
1021 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1031 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1022 CPUFREQ_POLICY_NOTIFIER); 1032 CPUFREQ_POLICY_NOTIFIER);
1033
1034 mutex_lock(&cooling_list_lock);
1035 list_del(&cpufreq_dev->node);
1036 mutex_unlock(&cooling_list_lock);
1037
1023 mutex_unlock(&cooling_cpufreq_lock); 1038 mutex_unlock(&cooling_cpufreq_lock);
1024 1039
1025 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 63a448f9d93b..7006860f2f36 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -334,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
334 max_allocatable_power, current_temp, 334 max_allocatable_power, current_temp,
335 (s32)control_temp - (s32)current_temp); 335 (s32)control_temp - (s32)current_temp);
336 336
337 devm_kfree(&tz->device, req_power); 337 kfree(req_power);
338unlock: 338unlock:
339 mutex_unlock(&tz->lock); 339 mutex_unlock(&tz->lock);
340 340
@@ -426,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
426 return -EINVAL; 426 return -EINVAL;
427 } 427 }
428 428
429 params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL); 429 params = kzalloc(sizeof(*params), GFP_KERNEL);
430 if (!params) 430 if (!params)
431 return -ENOMEM; 431 return -ENOMEM;
432 432
@@ -468,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
468 return 0; 468 return 0;
469 469
470free: 470free:
471 devm_kfree(&tz->device, params); 471 kfree(params);
472 return ret; 472 return ret;
473} 473}
474 474
475static void power_allocator_unbind(struct thermal_zone_device *tz) 475static void power_allocator_unbind(struct thermal_zone_device *tz)
476{ 476{
477 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 477 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
478 devm_kfree(&tz->device, tz->governor_data); 478 kfree(tz->governor_data);
479 tz->governor_data = NULL; 479 tz->governor_data = NULL;
480} 480}
481 481
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8bf495ffb020..e0606c01e8ac 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -22,9 +22,7 @@ source "drivers/gpu/vga/Kconfig"
22source "drivers/gpu/host1x/Kconfig" 22source "drivers/gpu/host1x/Kconfig"
23source "drivers/gpu/ipu-v3/Kconfig" 23source "drivers/gpu/ipu-v3/Kconfig"
24 24
25menu "Direct Rendering Manager"
26source "drivers/gpu/drm/Kconfig" 25source "drivers/gpu/drm/Kconfig"
27endmenu
28 26
29menu "Frame buffer Devices" 27menu "Frame buffer Devices"
30source "drivers/video/fbdev/Kconfig" 28source "drivers/video/fbdev/Kconfig"
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
1306 int y; 1306 int y;
1307 int c = scr_readw((u16 *) vc->vc_pos); 1307 int c = scr_readw((u16 *) vc->vc_pos);
1308 1308
1309 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1310
1309 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) 1311 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
1310 return; 1312 return;
1311 1313
1312 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1313 if (vc->vc_cursor_type & 0x10) 1314 if (vc->vc_cursor_type & 0x10)
1314 fbcon_del_cursor_timer(info); 1315 fbcon_del_cursor_timer(info);
1315 else 1316 else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..f888561568d9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
298 298
299# Helper logic selected only by the ARM Versatile platform family. 299# Helper logic selected only by the ARM Versatile platform family.
300config PLAT_VERSATILE_CLCD 300config PLAT_VERSATILE_CLCD
301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS 301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
302 depends on ARM 302 depends on ARM
303 depends on FB_ARMCLCD && FB=y 303 depends on FB_ARMCLCD && FB=y
304 304
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
60 } 60 }
61 prev = port; 61 prev = port;
62 } while (of_node_cmp(port->name, "port") != 0); 62 } while (of_node_cmp(port->name, "port") != 0);
63
64 of_node_put(ports);
63 } 65 }
64 66
65 return port; 67 return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
94 if (!port) 96 if (!port)
95 return NULL; 97 return NULL;
96 98
97 np = of_get_next_parent(port); 99 np = of_get_parent(port);
98 100
99 for (i = 0; i < 2 && np; ++i) { 101 for (i = 0; i < 2 && np; ++i) {
100 struct property *prop; 102 struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
653 goto err_free_dma; 653 goto err_free_dma;
654 } 654 }
655 655
656 ret = clk_enable(priv->clk); 656 ret = clk_prepare_enable(priv->clk);
657 if (ret < 0) { 657 if (ret < 0) {
658 dev_err(dev, "failed to enable clock\n"); 658 dev_err(dev, "failed to enable clock\n");
659 goto err_misc_deregister; 659 goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
685 misc_deregister(&priv->misc_dev); 685 misc_deregister(&priv->misc_dev);
686 686
687err_disable_clk: 687err_disable_clk:
688 clk_disable(priv->clk); 688 clk_disable_unprepare(priv->clk);
689 689
690 return ret; 690 return ret;
691} 691}
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
44 index = disp->native_mode; 44 index = disp->native_mode;
45 45
46 ret = videomode_from_timings(disp, vm, index); 46 ret = videomode_from_timings(disp, vm, index);
47 if (ret)
48 return ret;
49 47
50 display_timings_release(disp); 48 display_timings_release(disp);
51 49
52 return 0; 50 return ret;
53} 51}
54EXPORT_SYMBOL_GPL(of_get_videomode); 52EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1495eccb1617..96093ae369a5 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -452,12 +452,10 @@ static void xen_free_irq(unsigned irq)
452 irq_free_desc(irq); 452 irq_free_desc(irq);
453} 453}
454 454
455static void xen_evtchn_close(unsigned int port, unsigned int cpu) 455static void xen_evtchn_close(unsigned int port)
456{ 456{
457 struct evtchn_close close; 457 struct evtchn_close close;
458 458
459 xen_evtchn_op_close(port, cpu);
460
461 close.port = port; 459 close.port = port;
462 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 460 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
463 BUG(); 461 BUG();
@@ -546,7 +544,7 @@ out:
546 544
547err: 545err:
548 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); 546 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
549 xen_evtchn_close(evtchn, NR_CPUS); 547 xen_evtchn_close(evtchn);
550 return 0; 548 return 0;
551} 549}
552 550
@@ -567,7 +565,7 @@ static void shutdown_pirq(struct irq_data *data)
567 return; 565 return;
568 566
569 mask_evtchn(evtchn); 567 mask_evtchn(evtchn);
570 xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn)); 568 xen_evtchn_close(evtchn);
571 xen_irq_info_cleanup(info); 569 xen_irq_info_cleanup(info);
572} 570}
573 571
@@ -611,7 +609,7 @@ static void __unbind_from_irq(unsigned int irq)
611 if (VALID_EVTCHN(evtchn)) { 609 if (VALID_EVTCHN(evtchn)) {
612 unsigned int cpu = cpu_from_irq(irq); 610 unsigned int cpu = cpu_from_irq(irq);
613 611
614 xen_evtchn_close(evtchn, cpu); 612 xen_evtchn_close(evtchn);
615 613
616 switch (type_from_irq(irq)) { 614 switch (type_from_irq(irq)) {
617 case IRQT_VIRQ: 615 case IRQT_VIRQ:
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 6df8aac966b9..ed673e1acd61 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -255,12 +255,6 @@ static void evtchn_fifo_unmask(unsigned port)
255 } 255 }
256} 256}
257 257
258static bool evtchn_fifo_is_linked(unsigned port)
259{
260 event_word_t *word = event_word_from_port(port);
261 return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
262}
263
264static uint32_t clear_linked(volatile event_word_t *word) 258static uint32_t clear_linked(volatile event_word_t *word)
265{ 259{
266 event_word_t new, old, w; 260 event_word_t new, old, w;
@@ -287,8 +281,7 @@ static void handle_irq_for_port(unsigned port)
287 281
288static void consume_one_event(unsigned cpu, 282static void consume_one_event(unsigned cpu,
289 struct evtchn_fifo_control_block *control_block, 283 struct evtchn_fifo_control_block *control_block,
290 unsigned priority, unsigned long *ready, 284 unsigned priority, unsigned long *ready)
291 bool drop)
292{ 285{
293 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
294 uint32_t head; 287 uint32_t head;
@@ -320,15 +313,13 @@ static void consume_one_event(unsigned cpu,
320 if (head == 0) 313 if (head == 0)
321 clear_bit(priority, ready); 314 clear_bit(priority, ready);
322 315
323 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { 316 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
324 if (likely(!drop)) 317 handle_irq_for_port(port);
325 handle_irq_for_port(port);
326 }
327 318
328 q->head[priority] = head; 319 q->head[priority] = head;
329} 320}
330 321
331static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) 322static void evtchn_fifo_handle_events(unsigned cpu)
332{ 323{
333 struct evtchn_fifo_control_block *control_block; 324 struct evtchn_fifo_control_block *control_block;
334 unsigned long ready; 325 unsigned long ready;
@@ -340,16 +331,11 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
340 331
341 while (ready) { 332 while (ready) {
342 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
343 consume_one_event(cpu, control_block, q, &ready, drop); 334 consume_one_event(cpu, control_block, q, &ready);
344 ready |= xchg(&control_block->ready, 0); 335 ready |= xchg(&control_block->ready, 0);
345 } 336 }
346} 337}
347 338
348static void evtchn_fifo_handle_events(unsigned cpu)
349{
350 __evtchn_fifo_handle_events(cpu, false);
351}
352
353static void evtchn_fifo_resume(void) 339static void evtchn_fifo_resume(void)
354{ 340{
355 unsigned cpu; 341 unsigned cpu;
@@ -385,26 +371,6 @@ static void evtchn_fifo_resume(void)
385 event_array_pages = 0; 371 event_array_pages = 0;
386} 372}
387 373
388static void evtchn_fifo_close(unsigned port, unsigned int cpu)
389{
390 if (cpu == NR_CPUS)
391 return;
392
393 get_online_cpus();
394 if (cpu_online(cpu)) {
395 if (WARN_ON(irqs_disabled()))
396 goto out;
397
398 while (evtchn_fifo_is_linked(port))
399 cpu_relax();
400 } else {
401 __evtchn_fifo_handle_events(cpu, true);
402 }
403
404out:
405 put_online_cpus();
406}
407
408static const struct evtchn_ops evtchn_ops_fifo = { 374static const struct evtchn_ops evtchn_ops_fifo = {
409 .max_channels = evtchn_fifo_max_channels, 375 .max_channels = evtchn_fifo_max_channels,
410 .nr_channels = evtchn_fifo_nr_channels, 376 .nr_channels = evtchn_fifo_nr_channels,
@@ -418,7 +384,6 @@ static const struct evtchn_ops evtchn_ops_fifo = {
418 .unmask = evtchn_fifo_unmask, 384 .unmask = evtchn_fifo_unmask,
419 .handle_events = evtchn_fifo_handle_events, 385 .handle_events = evtchn_fifo_handle_events,
420 .resume = evtchn_fifo_resume, 386 .resume = evtchn_fifo_resume,
421 .close = evtchn_fifo_close,
422}; 387};
423 388
424static int evtchn_fifo_alloc_control_block(unsigned cpu) 389static int evtchn_fifo_alloc_control_block(unsigned cpu)
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index d18e12315ec0..50c2050a1e32 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -68,7 +68,6 @@ struct evtchn_ops {
68 bool (*test_and_set_mask)(unsigned port); 68 bool (*test_and_set_mask)(unsigned port);
69 void (*mask)(unsigned port); 69 void (*mask)(unsigned port);
70 void (*unmask)(unsigned port); 70 void (*unmask)(unsigned port);
71 void (*close)(unsigned port, unsigned cpu);
72 71
73 void (*handle_events)(unsigned cpu); 72 void (*handle_events)(unsigned cpu);
74 void (*resume)(void); 73 void (*resume)(void);
@@ -146,12 +145,6 @@ static inline void xen_evtchn_resume(void)
146 evtchn_ops->resume(); 145 evtchn_ops->resume();
147} 146}
148 147
149static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
150{
151 if (evtchn_ops->close)
152 return evtchn_ops->close(port, cpu);
153}
154
155void xen_evtchn_2l_init(void); 148void xen_evtchn_2l_init(void);
156int xen_evtchn_fifo_init(void); 149int xen_evtchn_fifo_init(void);
157 150
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..e30353575d5d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
814 814
815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
816 addrs); 816 addrs);
817 if (!rv) 817 if (!rv) {
818 vunmap(vaddr); 818 vunmap(vaddr);
819 free_xenballooned_pages(node->nr_handles, node->hvm.pages);
820 }
819 else 821 else
820 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, 822 WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
821 node->nr_handles); 823 node->nr_handles);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b35d460..ebb5e37455a0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2246 2246
2247 err = -EINVAL; 2247 err = -EINVAL;
2248 if (old) { 2248 if (old) {
2249 struct fuse_dev *fud = fuse_get_dev(old); 2249 struct fuse_dev *fud = NULL;
2250
2251 /*
2252 * Check against file->f_op because CUSE
2253 * uses the same ioctl handler.
2254 */
2255 if (old->f_op == file->f_op &&
2256 old->f_cred->user_ns == file->f_cred->user_ns)
2257 fud = fuse_get_dev(old);
2250 2258
2251 if (fud) { 2259 if (fud) {
2252 mutex_lock(&fuse_mutex); 2260 mutex_lock(&fuse_mutex);
diff --git a/fs/namei.c b/fs/namei.c
index fbbcf0993312..1c2105ed20c5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
879 return 0; 879 return 0;
880 880
881 /* Allowed if parent directory not sticky and world-writable. */ 881 /* Allowed if parent directory not sticky and world-writable. */
882 parent = nd->path.dentry->d_inode; 882 parent = nd->inode;
883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) 883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
884 return 0; 884 return 0;
885 885
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5908848d86b3..8b5ce7c5d9bb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -681,7 +681,7 @@ struct drm_minor {
681 681
682struct drm_pending_vblank_event { 682struct drm_pending_vblank_event {
683 struct drm_pending_event base; 683 struct drm_pending_event base;
684 int pipe; 684 unsigned int pipe;
685 struct drm_event_vblank event; 685 struct drm_event_vblank event;
686}; 686};
687 687
@@ -700,7 +700,7 @@ struct drm_vblank_crtc {
700 /* for wraparound handling */ 700 /* for wraparound handling */
701 u32 last_wait; /* Last vblank seqno waited per CRTC */ 701 u32 last_wait; /* Last vblank seqno waited per CRTC */
702 unsigned int inmodeset; /* Display driver is setting mode */ 702 unsigned int inmodeset; /* Display driver is setting mode */
703 int crtc; /* crtc index */ 703 unsigned int pipe; /* crtc index */
704 bool enabled; /* so we don't call enable more than 704 bool enabled; /* so we don't call enable more than
705 once per disable */ 705 once per disable */
706}; 706};
@@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
887/*@{*/ 887/*@{*/
888 888
889 /* Driver support (drm_drv.h) */ 889 /* Driver support (drm_drv.h) */
890extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
890extern long drm_ioctl(struct file *filp, 891extern long drm_ioctl(struct file *filp,
891 unsigned int cmd, unsigned long arg); 892 unsigned int cmd, unsigned long arg);
892extern long drm_compat_ioctl(struct file *filp, 893extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
920extern int drm_irq_install(struct drm_device *dev, int irq); 921extern int drm_irq_install(struct drm_device *dev, int irq);
921extern int drm_irq_uninstall(struct drm_device *dev); 922extern int drm_irq_uninstall(struct drm_device *dev);
922 923
923extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 924extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
924extern int drm_wait_vblank(struct drm_device *dev, void *data, 925extern int drm_wait_vblank(struct drm_device *dev, void *data,
925 struct drm_file *filp); 926 struct drm_file *filp);
926extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 927extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
927extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 928extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
928extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 929extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
929 struct timeval *vblanktime); 930 struct timeval *vblanktime);
930extern void drm_send_vblank_event(struct drm_device *dev, int crtc, 931extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
931 struct drm_pending_vblank_event *e); 932 struct drm_pending_vblank_event *e);
932extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 933extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
933 struct drm_pending_vblank_event *e); 934 struct drm_pending_vblank_event *e);
934extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 935extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
935extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 936extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
936extern int drm_vblank_get(struct drm_device *dev, int crtc); 937extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
937extern void drm_vblank_put(struct drm_device *dev, int crtc); 938extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
938extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 939extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
939extern void drm_crtc_vblank_put(struct drm_crtc *crtc); 940extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
940extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); 941extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
941extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); 942extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
942extern void drm_vblank_off(struct drm_device *dev, int crtc); 943extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
943extern void drm_vblank_on(struct drm_device *dev, int crtc); 944extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
944extern void drm_crtc_vblank_off(struct drm_crtc *crtc); 945extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
945extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 946extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
946extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 947extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
947extern void drm_vblank_cleanup(struct drm_device *dev); 948extern void drm_vblank_cleanup(struct drm_device *dev);
948 949
949extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 950extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
950 int crtc, int *max_error, 951 unsigned int pipe, int *max_error,
951 struct timeval *vblank_time, 952 struct timeval *vblank_time,
952 unsigned flags, 953 unsigned flags,
953 const struct drm_crtc *refcrtc, 954 const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
968} 969}
969 970
970/* Modesetting support */ 971/* Modesetting support */
971extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 972extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
972extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 973extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
973 974
974 /* Stub support (drm_stub.h) */ 975 /* Stub support (drm_stub.h) */
975extern struct drm_master *drm_master_get(struct drm_master *master); 976extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 574656965126..faaeff7db684 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -745,8 +745,6 @@ struct drm_connector {
745 uint8_t num_h_tile, num_v_tile; 745 uint8_t num_h_tile, num_v_tile;
746 uint8_t tile_h_loc, tile_v_loc; 746 uint8_t tile_h_loc, tile_v_loc;
747 uint16_t tile_h_size, tile_v_size; 747 uint16_t tile_h_size, tile_v_size;
748
749 struct list_head destroy_list;
750}; 748};
751 749
752/** 750/**
@@ -865,7 +863,7 @@ struct drm_plane {
865 863
866 uint32_t possible_crtcs; 864 uint32_t possible_crtcs;
867 uint32_t *format_types; 865 uint32_t *format_types;
868 uint32_t format_count; 866 unsigned int format_count;
869 bool format_default; 867 bool format_default;
870 868
871 struct drm_crtc *crtc; 869 struct drm_crtc *crtc;
@@ -1270,13 +1268,13 @@ extern int drm_universal_plane_init(struct drm_device *dev,
1270 unsigned long possible_crtcs, 1268 unsigned long possible_crtcs,
1271 const struct drm_plane_funcs *funcs, 1269 const struct drm_plane_funcs *funcs,
1272 const uint32_t *formats, 1270 const uint32_t *formats,
1273 uint32_t format_count, 1271 unsigned int format_count,
1274 enum drm_plane_type type); 1272 enum drm_plane_type type);
1275extern int drm_plane_init(struct drm_device *dev, 1273extern int drm_plane_init(struct drm_device *dev,
1276 struct drm_plane *plane, 1274 struct drm_plane *plane,
1277 unsigned long possible_crtcs, 1275 unsigned long possible_crtcs,
1278 const struct drm_plane_funcs *funcs, 1276 const struct drm_plane_funcs *funcs,
1279 const uint32_t *formats, uint32_t format_count, 1277 const uint32_t *formats, unsigned int format_count,
1280 bool is_primary); 1278 bool is_primary);
1281extern void drm_plane_cleanup(struct drm_plane *plane); 1279extern void drm_plane_cleanup(struct drm_plane *plane);
1282extern unsigned int drm_plane_index(struct drm_plane *plane); 1280extern unsigned int drm_plane_index(struct drm_plane *plane);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 6aa59b9c7335..8c52d0ef1fc9 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -578,6 +578,7 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
578u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], 578u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
579 int lane); 579 int lane);
580 580
581#define DP_BRANCH_OUI_HEADER_SIZE 0xc
581#define DP_RECEIVER_CAP_SIZE 0xf 582#define DP_RECEIVER_CAP_SIZE 0xf
582#define EDP_PSR_RECEIVER_CAP_SIZE 2 583#define EDP_PSR_RECEIVER_CAP_SIZE 2
583 584
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 799050198323..53c53c459b15 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -348,6 +348,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
348} 348}
349 349
350/** 350/**
351 * drm_eld_sad - Get ELD SAD structures.
352 * @eld: pointer to an eld memory structure with sad_count set
353 */
354static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
355{
356 unsigned int ver, mnl;
357
358 ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
359 if (ver != 2 && ver != 31)
360 return NULL;
361
362 mnl = drm_eld_mnl(eld);
363 if (mnl > 16)
364 return NULL;
365
366 return eld + DRM_ELD_CEA_SAD(mnl, 0);
367}
368
369/**
351 * drm_eld_sad_count - Get ELD SAD count. 370 * drm_eld_sad_count - Get ELD SAD count.
352 * @eld: pointer to an eld memory structure with sad_count set 371 * @eld: pointer to an eld memory structure with sad_count set
353 */ 372 */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def593..dbab4622b58f 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@ struct drm_fb_helper {
122 bool delayed_hotplug; 122 bool delayed_hotplug;
123}; 123};
124 124
125#ifdef CONFIG_DRM_FBDEV_EMULATION
125void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, 126void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
126 const struct drm_fb_helper_funcs *funcs); 127 const struct drm_fb_helper_funcs *funcs);
127int drm_fb_helper_init(struct drm_device *dev, 128int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
136 struct fb_info *info); 137 struct fb_info *info);
137 138
138bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); 139bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
140
141struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
142void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
143void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
139void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 144void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
140 uint32_t fb_width, uint32_t fb_height); 145 uint32_t fb_width, uint32_t fb_height);
141void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 146void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
142 uint32_t depth); 147 uint32_t depth);
143 148
149void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
150
151ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
152 size_t count, loff_t *ppos);
153ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
154 size_t count, loff_t *ppos);
155
156void drm_fb_helper_sys_fillrect(struct fb_info *info,
157 const struct fb_fillrect *rect);
158void drm_fb_helper_sys_copyarea(struct fb_info *info,
159 const struct fb_copyarea *area);
160void drm_fb_helper_sys_imageblit(struct fb_info *info,
161 const struct fb_image *image);
162
163void drm_fb_helper_cfb_fillrect(struct fb_info *info,
164 const struct fb_fillrect *rect);
165void drm_fb_helper_cfb_copyarea(struct fb_info *info,
166 const struct fb_copyarea *area);
167void drm_fb_helper_cfb_imageblit(struct fb_info *info,
168 const struct fb_image *image);
169
170void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
171
144int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 172int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
145 173
146int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 174int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
158int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); 186int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
159int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 187int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
160 struct drm_connector *connector); 188 struct drm_connector *connector);
189#else
190static inline void drm_fb_helper_prepare(struct drm_device *dev,
191 struct drm_fb_helper *helper,
192 const struct drm_fb_helper_funcs *funcs)
193{
194}
195
196static inline int drm_fb_helper_init(struct drm_device *dev,
197 struct drm_fb_helper *helper, int crtc_count,
198 int max_conn)
199{
200 return 0;
201}
202
203static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
204{
205}
206
207static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
208{
209 return 0;
210}
211
212static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
213 struct fb_info *info)
214{
215 return 0;
216}
217
218static inline int drm_fb_helper_set_par(struct fb_info *info)
219{
220 return 0;
221}
222
223static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
224 struct fb_info *info)
225{
226 return 0;
227}
228
229static inline bool
230drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
231{
232 return true;
233}
234
235static inline struct fb_info *
236drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
237{
238 return NULL;
239}
240
241static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
242{
243}
244static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
245{
246}
247
248static inline void drm_fb_helper_fill_var(struct fb_info *info,
249 struct drm_fb_helper *fb_helper,
250 uint32_t fb_width, uint32_t fb_height)
251{
252}
253
254static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
255 uint32_t depth)
256{
257}
258
259static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
260 struct fb_info *info)
261{
262 return 0;
263}
264
265static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
266{
267}
268
269static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
270 char __user *buf, size_t count,
271 loff_t *ppos)
272{
273 return -ENODEV;
274}
275
276static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
277 const char __user *buf,
278 size_t count, loff_t *ppos)
279{
280 return -ENODEV;
281}
282
283static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
284 const struct fb_fillrect *rect)
285{
286}
287
288static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
289 const struct fb_copyarea *area)
290{
291}
292
293static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
294 const struct fb_image *image)
295{
296}
297
298static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
299 const struct fb_fillrect *rect)
300{
301}
302
303static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
304 const struct fb_copyarea *area)
305{
306}
307
308static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
309 const struct fb_image *image)
310{
311}
312
313static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
314 int state)
315{
316}
317
318static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
319{
320 return 0;
321}
322
323static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
324 int bpp_sel)
325{
326 return 0;
327}
328
329static inline int
330drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
331{
332 return 0;
333}
334
335static inline int drm_fb_helper_debug_enter(struct fb_info *info)
336{
337 return 0;
338}
339
340static inline int drm_fb_helper_debug_leave(struct fb_info *info)
341{
342 return 0;
343}
344
345static inline struct drm_display_mode *
346drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
347 int width, int height)
348{
349 return NULL;
350}
351
352static inline struct drm_display_mode *
353drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
354 int width, int height)
355{
356 return NULL;
357}
358
359static inline int
360drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
361 struct drm_connector *connector)
362{
363 return 0;
364}
365
366static inline int
367drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
368 struct drm_connector *connector)
369{
370 return 0;
371}
372#endif
161#endif 373#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff565ba..5dd18bfdf601 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@ struct drm_crtc;
130struct drm_plane; 130struct drm_plane;
131 131
132void drm_modeset_lock_all(struct drm_device *dev); 132void drm_modeset_lock_all(struct drm_device *dev);
133int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
134void drm_modeset_unlock_all(struct drm_device *dev); 133void drm_modeset_unlock_all(struct drm_device *dev);
135void drm_modeset_lock_crtc(struct drm_crtc *crtc, 134void drm_modeset_lock_crtc(struct drm_crtc *crtc,
136 struct drm_plane *plane); 135 struct drm_plane *plane);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 176 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 177 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 178 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e16283afb9..dda401bf910e 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
43 * planes. 43 * planes.
44 */ 44 */
45 45
46extern int drm_crtc_init(struct drm_device *dev, 46int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
47 struct drm_crtc *crtc, 47 const struct drm_crtc_funcs *funcs);
48 const struct drm_crtc_funcs *funcs);
49 48
50/** 49/**
51 * drm_plane_helper_funcs - helper operations for CRTCs 50 * drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
79 plane->helper_private = funcs; 78 plane->helper_private = funcs;
80} 79}
81 80
82extern int drm_plane_helper_check_update(struct drm_plane *plane, 81int drm_plane_helper_check_update(struct drm_plane *plane,
83 struct drm_crtc *crtc, 82 struct drm_crtc *crtc,
84 struct drm_framebuffer *fb, 83 struct drm_framebuffer *fb,
85 struct drm_rect *src, 84 struct drm_rect *src,
86 struct drm_rect *dest, 85 struct drm_rect *dest,
87 const struct drm_rect *clip, 86 const struct drm_rect *clip,
88 int min_scale, 87 int min_scale,
89 int max_scale, 88 int max_scale,
90 bool can_position, 89 bool can_position,
91 bool can_update_disabled, 90 bool can_update_disabled,
92 bool *visible); 91 bool *visible);
93extern int drm_primary_helper_update(struct drm_plane *plane, 92int drm_primary_helper_update(struct drm_plane *plane,
94 struct drm_crtc *crtc, 93 struct drm_crtc *crtc,
95 struct drm_framebuffer *fb, 94 struct drm_framebuffer *fb,
96 int crtc_x, int crtc_y, 95 int crtc_x, int crtc_y,
97 unsigned int crtc_w, unsigned int crtc_h, 96 unsigned int crtc_w, unsigned int crtc_h,
98 uint32_t src_x, uint32_t src_y, 97 uint32_t src_x, uint32_t src_y,
99 uint32_t src_w, uint32_t src_h); 98 uint32_t src_w, uint32_t src_h);
100extern int drm_primary_helper_disable(struct drm_plane *plane); 99int drm_primary_helper_disable(struct drm_plane *plane);
101extern void drm_primary_helper_destroy(struct drm_plane *plane); 100void drm_primary_helper_destroy(struct drm_plane *plane);
102extern const struct drm_plane_funcs drm_primary_helper_funcs; 101extern const struct drm_plane_funcs drm_primary_helper_funcs;
103 102
104int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, 103int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6c78956aa470..d2992bfa1706 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -385,8 +385,6 @@ enum {
385 SATA_SSP = 0x06, /* Software Settings Preservation */ 385 SATA_SSP = 0x06, /* Software Settings Preservation */
386 SATA_DEVSLP = 0x09, /* Device Sleep */ 386 SATA_DEVSLP = 0x09, /* Device Sleep */
387 387
388 SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
389
390 /* feature values for SET_MAX */ 388 /* feature values for SET_MAX */
391 ATA_SET_MAX_ADDR = 0x00, 389 ATA_SET_MAX_ADDR = 0x00,
392 ATA_SET_MAX_PASSWD = 0x01, 390 ATA_SET_MAX_PASSWD = 0x01,
@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
530#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) 528#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
531#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) 529#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
532#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) 530#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
533#define ata_id_has_ncq_autosense(id) \
534 ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
535 531
536static inline bool ata_id_has_hipm(const u16 *id) 532static inline bool ata_id_has_hipm(const u16 *id)
537{ 533{
@@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
720 return false; 716 return false;
721} 717}
722 718
723static inline bool ata_id_has_sense_reporting(const u16 *id)
724{
725 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
726 return false;
727 return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
728}
729
730static inline bool ata_id_sense_reporting_enabled(const u16 *id)
731{
732 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
733 return false;
734 return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
735}
736
737/** 719/**
738 * ata_id_major_version - get ATA level of drive 720 * ata_id_major_version - get ATA level of drive
739 * @id: Identify data 721 * @id: Identify data
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 92188b0225bb..51744bcf74ee 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
484extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); 484extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
485extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, 485extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
486 void *vcpu_info); 486 void *vcpu_info);
487extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
487#endif 488#endif
488 489
489/* Handling of unhandled and spurious interrupts: */ 490/* Handling of unhandled and spurious interrupts: */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2e872f92dbac..bf6f117fcf4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1003,6 +1003,34 @@ static inline int page_mapped(struct page *page)
1003} 1003}
1004 1004
1005/* 1005/*
1006 * Return true only if the page has been allocated with
1007 * ALLOC_NO_WATERMARKS and the low watermark was not
1008 * met implying that the system is under some pressure.
1009 */
1010static inline bool page_is_pfmemalloc(struct page *page)
1011{
1012 /*
1013 * Page index cannot be this large so this must be
1014 * a pfmemalloc page.
1015 */
1016 return page->index == -1UL;
1017}
1018
1019/*
1020 * Only to be called by the page allocator on a freshly allocated
1021 * page.
1022 */
1023static inline void set_page_pfmemalloc(struct page *page)
1024{
1025 page->index = -1UL;
1026}
1027
1028static inline void clear_page_pfmemalloc(struct page *page)
1029{
1030 page->index = 0;
1031}
1032
1033/*
1006 * Different kinds of faults, as returned by handle_mm_fault(). 1034 * Different kinds of faults, as returned by handle_mm_fault().
1007 * Used to decide whether a process gets delivered SIGBUS or 1035 * Used to decide whether a process gets delivered SIGBUS or
1008 * just gets major/minor fault counters bumped up. 1036 * just gets major/minor fault counters bumped up.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0038ac7466fd..15549578d559 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -63,15 +63,6 @@ struct page {
63 union { 63 union {
64 pgoff_t index; /* Our offset within mapping. */ 64 pgoff_t index; /* Our offset within mapping. */
65 void *freelist; /* sl[aou]b first free object */ 65 void *freelist; /* sl[aou]b first free object */
66 bool pfmemalloc; /* If set by the page allocator,
67 * ALLOC_NO_WATERMARKS was set
68 * and the low watermark was not
69 * met implying that the system
70 * is under some pressure. The
71 * caller should try ensure
72 * this page is only used to
73 * free other pages.
74 */
75 }; 66 };
76 67
77 union { 68 union {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e87d53..9b88536487e6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1603 1603
1604 /* 1604 /*
1605 * Propagate page->pfmemalloc to the skb if we can. The problem is 1605 * Propagate page pfmemalloc to the skb if we can. The problem is
1606 * that not all callers have unique ownership of the page. If 1606 * that not all callers have unique ownership of the page but rely
1607 * pfmemalloc is set, we check the mapping as a mapping implies 1607 * on page_is_pfmemalloc doing the right thing(tm).
1608 * page->index is set (index and pfmemalloc share space).
1609 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1610 * do not lose pfmemalloc information as the pages would not be
1611 * allocated using __GFP_MEMALLOC.
1612 */ 1608 */
1613 frag->page.p = page; 1609 frag->page.p = page;
1614 frag->page_offset = off; 1610 frag->page_offset = off;
1615 skb_frag_size_set(frag, size); 1611 skb_frag_size_set(frag, size);
1616 1612
1617 page = compound_head(page); 1613 page = compound_head(page);
1618 if (page->pfmemalloc && !page->mapping) 1614 if (page_is_pfmemalloc(page))
1619 skb->pfmemalloc = true; 1615 skb->pfmemalloc = true;
1620} 1616}
1621 1617
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
2263static inline void skb_propagate_pfmemalloc(struct page *page, 2259static inline void skb_propagate_pfmemalloc(struct page *page,
2264 struct sk_buff *skb) 2260 struct sk_buff *skb)
2265{ 2261{
2266 if (page && page->pfmemalloc) 2262 if (page_is_pfmemalloc(page))
2267 skb->pfmemalloc = true; 2263 skb->pfmemalloc = true;
2268} 2264}
2269 2265
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2884 * 2880 *
2885 * PHY drivers may accept clones of transmitted packets for 2881 * PHY drivers may accept clones of transmitted packets for
2886 * timestamping via their phy_driver.txtstamp method. These drivers 2882 * timestamping via their phy_driver.txtstamp method. These drivers
2887 * must call this function to return the skb back to the stack, with 2883 * must call this function to return the skb back to the stack with a
2888 * or without a timestamp. 2884 * timestamp.
2889 * 2885 *
2890 * @skb: clone of the the original outgoing packet 2886 * @skb: clone of the the original outgoing packet
2891 * @hwtstamps: hardware time stamps, may be NULL if not available 2887 * @hwtstamps: hardware time stamps
2892 * 2888 *
2893 */ 2889 */
2894void skb_complete_tx_timestamp(struct sk_buff *skb, 2890void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 45534da57759..644bdc61c387 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,8 +74,6 @@ enum rc_filter_type {
74 * @input_dev: the input child device used to communicate events to userspace 74 * @input_dev: the input child device used to communicate events to userspace
75 * @driver_type: specifies if protocol decoding is done in hardware or software 75 * @driver_type: specifies if protocol decoding is done in hardware or software
76 * @idle: used to keep track of RX state 76 * @idle: used to keep track of RX state
77 * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
78 * wakeup protocols is the set of all raw encoders
79 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols 77 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols
80 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols 78 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
81 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols 79 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
136 struct input_dev *input_dev; 134 struct input_dev *input_dev;
137 enum rc_driver_type driver_type; 135 enum rc_driver_type driver_type;
138 bool idle; 136 bool idle;
139 bool encode_wakeup;
140 u64 allowed_protocols; 137 u64 allowed_protocols;
141 u64 enabled_protocols; 138 u64 enabled_protocols;
142 u64 allowed_wakeup_protocols; 139 u64 allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
246#define US_TO_NS(usec) ((usec) * 1000) 243#define US_TO_NS(usec) ((usec) * 1000)
247#define MS_TO_US(msec) ((msec) * 1000) 244#define MS_TO_US(msec) ((msec) * 1000)
248#define MS_TO_NS(msec) ((msec) * 1000 * 1000) 245#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
249#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
250 246
251void ir_raw_event_handle(struct rc_dev *dev); 247void ir_raw_event_handle(struct rc_dev *dev);
252int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); 248int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
254int ir_raw_event_store_with_filter(struct rc_dev *dev, 250int ir_raw_event_store_with_filter(struct rc_dev *dev,
255 struct ir_raw_event *ev); 251 struct ir_raw_event *ev);
256void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); 252void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
257int ir_raw_encode_scancode(u64 protocols,
258 const struct rc_scancode_filter *scancode,
259 struct ir_raw_event *events, unsigned int max);
260 253
261static inline void ir_raw_event_reset(struct rc_dev *dev) 254static inline void ir_raw_event_reset(struct rc_dev *dev)
262{ 255{
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 22a44c2f5963..c192e1b46cdc 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -139,6 +139,7 @@ enum vb2_io_modes {
139 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf 139 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
140 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver 140 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver 141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
142 * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
142 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used 143 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
143 * in a hardware operation 144 * in a hardware operation
144 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but 145 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
152 VB2_BUF_STATE_PREPARING, 153 VB2_BUF_STATE_PREPARING,
153 VB2_BUF_STATE_PREPARED, 154 VB2_BUF_STATE_PREPARED,
154 VB2_BUF_STATE_QUEUED, 155 VB2_BUF_STATE_QUEUED,
156 VB2_BUF_STATE_REQUEUEING,
155 VB2_BUF_STATE_ACTIVE, 157 VB2_BUF_STATE_ACTIVE,
156 VB2_BUF_STATE_DONE, 158 VB2_BUF_STATE_DONE,
157 VB2_BUF_STATE_ERROR, 159 VB2_BUF_STATE_ERROR,
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 4942710ef720..8d1d7fa67ec4 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
28 u64 * info_out); 28 u64 * info_out);
29 29
30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); 30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
31extern void scsi_set_sense_information(u8 *buf, u64 info);
32 31
33extern int scsi_ioctl_reset(struct scsi_device *, int __user *); 32extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
34 33
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 865a141b118b..427bc41df3ae 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
141 int io_ops_count; 141 int io_ops_count;
142}; 142};
143 143
144#ifdef CONFIG_SND_SOC_TOPOLOGY
145
144/* gets a pointer to data from the firmware block header */ 146/* gets a pointer to data from the firmware block header */
145static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr) 147static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
146{ 148{
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
165 const struct snd_soc_tplg_widget_events *events, int num_events, 167 const struct snd_soc_tplg_widget_events *events, int num_events,
166 u16 event_type); 168 u16 event_type);
167 169
170#else
171
172static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
173 u32 index)
174{
175 return 0;
176}
177
178#endif
179
168#endif 180#endif
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedbe38e..05b204954d16 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
64#define DRM_VMW_GB_SURFACE_CREATE 23 64#define DRM_VMW_GB_SURFACE_CREATE 23
65#define DRM_VMW_GB_SURFACE_REF 24 65#define DRM_VMW_GB_SURFACE_REF 24
66#define DRM_VMW_SYNCCPU 25 66#define DRM_VMW_SYNCCPU 25
67#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
67 68
68/*************************************************************************/ 69/*************************************************************************/
69/** 70/**
@@ -88,6 +89,8 @@
88#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 89#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
89#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 90#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
90#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 91#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
92#define DRM_VMW_PARAM_SCREEN_TARGET 11
93#define DRM_VMW_PARAM_DX 12
91 94
92/** 95/**
93 * enum drm_vmw_handle_type - handle type for ref ioctls 96 * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
296 * Argument to the DRM_VMW_EXECBUF Ioctl. 299 * Argument to the DRM_VMW_EXECBUF Ioctl.
297 */ 300 */
298 301
299#define DRM_VMW_EXECBUF_VERSION 1 302#define DRM_VMW_EXECBUF_VERSION 2
300 303
301struct drm_vmw_execbuf_arg { 304struct drm_vmw_execbuf_arg {
302 uint64_t commands; 305 uint64_t commands;
@@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
305 uint64_t fence_rep; 308 uint64_t fence_rep;
306 uint32_t version; 309 uint32_t version;
307 uint32_t flags; 310 uint32_t flags;
311 uint32_t context_handle;
312 uint32_t pad64;
308}; 313};
309 314
310/** 315/**
@@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
825enum drm_vmw_shader_type { 830enum drm_vmw_shader_type {
826 drm_vmw_shader_type_vs = 0, 831 drm_vmw_shader_type_vs = 0,
827 drm_vmw_shader_type_ps, 832 drm_vmw_shader_type_ps,
828 drm_vmw_shader_type_gs
829}; 833};
830 834
831 835
@@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
907 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 911 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
908 * if none. 912 * if none.
909 * @base_size Size of the base mip level for all faces. 913 * @base_size Size of the base mip level for all faces.
914 * @array_size Must be zero for non-DX hardware, and if non-zero
915 * svga3d_flags must have proper bind flags setup.
910 * 916 *
911 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 917 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
912 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 918 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
919 uint32_t multisample_count; 925 uint32_t multisample_count;
920 uint32_t autogen_filter; 926 uint32_t autogen_filter;
921 uint32_t buffer_handle; 927 uint32_t buffer_handle;
922 uint32_t pad64; 928 uint32_t array_size;
923 struct drm_vmw_size base_size; 929 struct drm_vmw_size base_size;
924}; 930};
925 931
@@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
1059 uint32_t pad64; 1065 uint32_t pad64;
1060}; 1066};
1061 1067
1068/*************************************************************************/
1069/**
1070 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1071 *
1072 * Allocates a device unique context id, and queues a create context command
1073 * for the host. Does not wait for host completion.
1074 */
1075enum drm_vmw_extended_context {
1076 drm_vmw_context_legacy,
1077 drm_vmw_context_dx
1078};
1079
1080/**
1081 * union drm_vmw_extended_context_arg
1082 *
1083 * @req: Context type.
1084 * @rep: Context identifier.
1085 *
1086 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1087 */
1088union drm_vmw_extended_context_arg {
1089 enum drm_vmw_extended_context req;
1090 struct drm_vmw_context_arg rep;
1091};
1062#endif 1092#endif
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 51b8066a223b..247c50bd60f0 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,6 +18,12 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <sound/asound.h> 19#include <sound/asound.h>
20 20
21#ifndef __KERNEL__
22#error This API is an early revision and not enabled in the current
23#error kernel release, it will be enabled in a future kernel version
24#error with incompatible changes to what is here.
25#endif
26
21/* 27/*
22 * Maximum number of channels topology kcontrol can represent. 28 * Maximum number of channels topology kcontrol can represent.
23 */ 29 */
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
253} 253}
254 254
255/* 255/*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265/*
256 * Wait until all currently ongoing simple ops have completed. 266 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock. 267 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check 268 * New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
275 sem = sma->sem_base + i; 285 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock); 286 spin_unlock_wait(&sem->lock);
277 } 287 }
288 ipc_smp_acquire__after_spin_is_unlocked();
278} 289}
279 290
280/* 291/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
327 /* Then check that the global lock is free */ 338 /* Then check that the global lock is free */
328 if (!spin_is_locked(&sma->sem_perm.lock)) { 339 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* 340 /*
330 * The ipc object lock check must be visible on all 341 * We need a memory barrier with acquire semantics,
331 * cores before rechecking the complex count. Otherwise 342 * otherwise we can race with another thread that does:
332 * we can race with another thread that does:
333 * complex_count++; 343 * complex_count++;
334 * spin_unlock(sem_perm.lock); 344 * spin_unlock(sem_perm.lock);
335 */ 345 */
336 smp_rmb(); 346 ipc_smp_acquire__after_spin_is_unlocked();
337 347
338 /* 348 /*
339 * Now repeat the test of complex_count: 349 * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
2074 rcu_read_lock(); 2084 rcu_read_lock();
2075 un = list_entry_rcu(ulp->list_proc.next, 2085 un = list_entry_rcu(ulp->list_proc.next,
2076 struct sem_undo, list_proc); 2086 struct sem_undo, list_proc);
2077 if (&un->list_proc == &ulp->list_proc) 2087 if (&un->list_proc == &ulp->list_proc) {
2078 semid = -1; 2088 /*
2079 else 2089 * We must wait for freeary() before freeing this ulp,
2080 semid = un->semid; 2090 * in case we raced with last sem_undo. There is a small
2091 * possibility where we exit while freeary() didn't
2092 * finish unlocking sem_undo_list.
2093 */
2094 spin_unlock_wait(&ulp->lock);
2095 rcu_read_unlock();
2096 break;
2097 }
2098 spin_lock(&ulp->lock);
2099 semid = un->semid;
2100 spin_unlock(&ulp->lock);
2081 2101
2102 /* exit_sem raced with IPC_RMID, nothing to do */
2082 if (semid == -1) { 2103 if (semid == -1) {
2083 rcu_read_unlock(); 2104 rcu_read_unlock();
2084 break; 2105 continue;
2085 } 2106 }
2086 2107
2087 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); 2108 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2088 /* exit_sem raced with IPC_RMID, nothing to do */ 2109 /* exit_sem raced with IPC_RMID, nothing to do */
2089 if (IS_ERR(sma)) { 2110 if (IS_ERR(sma)) {
2090 rcu_read_unlock(); 2111 rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
2112 ipc_assert_locked_object(&sma->sem_perm); 2133 ipc_assert_locked_object(&sma->sem_perm);
2113 list_del(&un->list_id); 2134 list_del(&un->list_id);
2114 2135
2115 spin_lock(&ulp->lock); 2136 /* we are the last process using this ulp, acquiring ulp->lock
2137 * isn't required. Besides that, we are also protected against
2138 * IPC_RMID as we hold sma->sem_perm lock now
2139 */
2116 list_del_rcu(&un->list_proc); 2140 list_del_rcu(&un->list_proc);
2117 spin_unlock(&ulp->lock);
2118 2141
2119 /* perform adjustments registered in un */ 2142 /* perform adjustments registered in un */
2120 for (i = 0; i < sma->sem_nsems; i++) { 2143 for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ee14e3a35a29..f0acff0f66c9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1223 spin_unlock_irq(&callback_lock); 1223 spin_unlock_irq(&callback_lock);
1224 1224
1225 /* use trialcs->mems_allowed as a temp variable */ 1225 /* use trialcs->mems_allowed as a temp variable */
1226 update_nodemasks_hier(cs, &cs->mems_allowed); 1226 update_nodemasks_hier(cs, &trialcs->mems_allowed);
1227done: 1227done:
1228 return retval; 1228 return retval;
1229} 1229}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
1868 1868
1869 perf_pmu_disable(event->pmu); 1869 perf_pmu_disable(event->pmu);
1870 1870
1871 event->tstamp_running += tstamp - event->tstamp_stopped;
1872
1873 perf_set_shadow_time(event, ctx, tstamp); 1871 perf_set_shadow_time(event, ctx, tstamp);
1874 1872
1875 perf_log_itrace_start(event); 1873 perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
1881 goto out; 1879 goto out;
1882 } 1880 }
1883 1881
1882 event->tstamp_running += tstamp - event->tstamp_stopped;
1883
1884 if (!is_software_event(event)) 1884 if (!is_software_event(event))
1885 cpuctx->active_oncpu++; 1885 cpuctx->active_oncpu++;
1886 if (!ctx->nr_active++) 1886 if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
3958 perf_event_for_each_child(sibling, func); 3958 perf_event_for_each_child(sibling, func);
3959} 3959}
3960 3960
3961static int perf_event_period(struct perf_event *event, u64 __user *arg) 3961struct period_event {
3962{ 3962 struct perf_event *event;
3963 struct perf_event_context *ctx = event->ctx;
3964 int ret = 0, active;
3965 u64 value; 3963 u64 value;
3964};
3966 3965
3967 if (!is_sampling_event(event)) 3966static int __perf_event_period(void *info)
3968 return -EINVAL; 3967{
3969 3968 struct period_event *pe = info;
3970 if (copy_from_user(&value, arg, sizeof(value))) 3969 struct perf_event *event = pe->event;
3971 return -EFAULT; 3970 struct perf_event_context *ctx = event->ctx;
3972 3971 u64 value = pe->value;
3973 if (!value) 3972 bool active;
3974 return -EINVAL;
3975 3973
3976 raw_spin_lock_irq(&ctx->lock); 3974 raw_spin_lock(&ctx->lock);
3977 if (event->attr.freq) { 3975 if (event->attr.freq) {
3978 if (value > sysctl_perf_event_sample_rate) {
3979 ret = -EINVAL;
3980 goto unlock;
3981 }
3982
3983 event->attr.sample_freq = value; 3976 event->attr.sample_freq = value;
3984 } else { 3977 } else {
3985 event->attr.sample_period = value; 3978 event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
3998 event->pmu->start(event, PERF_EF_RELOAD); 3991 event->pmu->start(event, PERF_EF_RELOAD);
3999 perf_pmu_enable(ctx->pmu); 3992 perf_pmu_enable(ctx->pmu);
4000 } 3993 }
3994 raw_spin_unlock(&ctx->lock);
4001 3995
4002unlock: 3996 return 0;
3997}
3998
3999static int perf_event_period(struct perf_event *event, u64 __user *arg)
4000{
4001 struct period_event pe = { .event = event, };
4002 struct perf_event_context *ctx = event->ctx;
4003 struct task_struct *task;
4004 u64 value;
4005
4006 if (!is_sampling_event(event))
4007 return -EINVAL;
4008
4009 if (copy_from_user(&value, arg, sizeof(value)))
4010 return -EFAULT;
4011
4012 if (!value)
4013 return -EINVAL;
4014
4015 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4016 return -EINVAL;
4017
4018 task = ctx->task;
4019 pe.value = value;
4020
4021 if (!task) {
4022 cpu_function_call(event->cpu, __perf_event_period, &pe);
4023 return 0;
4024 }
4025
4026retry:
4027 if (!task_function_call(task, __perf_event_period, &pe))
4028 return 0;
4029
4030 raw_spin_lock_irq(&ctx->lock);
4031 if (ctx->is_active) {
4032 raw_spin_unlock_irq(&ctx->lock);
4033 task = ctx->task;
4034 goto retry;
4035 }
4036
4037 __perf_event_period(&pe);
4003 raw_spin_unlock_irq(&ctx->lock); 4038 raw_spin_unlock_irq(&ctx->lock);
4004 4039
4005 return ret; 4040 return 0;
4006} 4041}
4007 4042
4008static const struct file_operations perf_fops; 4043static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
4740 * to user-space before waking everybody up. 4775 * to user-space before waking everybody up.
4741 */ 4776 */
4742 4777
4778static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4779{
4780 /* only the parent has fasync state */
4781 if (event->parent)
4782 event = event->parent;
4783 return &event->fasync;
4784}
4785
4743void perf_event_wakeup(struct perf_event *event) 4786void perf_event_wakeup(struct perf_event *event)
4744{ 4787{
4745 ring_buffer_wakeup(event); 4788 ring_buffer_wakeup(event);
4746 4789
4747 if (event->pending_kill) { 4790 if (event->pending_kill) {
4748 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4791 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
4749 event->pending_kill = 0; 4792 event->pending_kill = 0;
4750 } 4793 }
4751} 4794}
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
6124 else 6167 else
6125 perf_event_output(event, data, regs); 6168 perf_event_output(event, data, regs);
6126 6169
6127 if (event->fasync && event->pending_kill) { 6170 if (*perf_event_fasync(event) && event->pending_kill) {
6128 event->pending_wakeup = 1; 6171 event->pending_wakeup = 1;
6129 irq_work_queue(&event->pending); 6172 irq_work_queue(&event->pending);
6130 } 6173 }
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b1aa9d..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
559 rb->aux_priv = NULL; 559 rb->aux_priv = NULL;
560 } 560 }
561 561
562 for (pg = 0; pg < rb->aux_nr_pages; pg++) 562 if (rb->aux_nr_pages) {
563 rb_free_aux_page(rb, pg); 563 for (pg = 0; pg < rb->aux_nr_pages; pg++)
564 rb_free_aux_page(rb, pg);
564 565
565 kfree(rb->aux_pages); 566 kfree(rb->aux_pages);
566 rb->aux_nr_pages = 0; 567 rb->aux_nr_pages = 0;
568 }
567} 569}
568 570
569void rb_free_aux(struct ring_buffer *rb) 571void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 27f4332c7f84..ae216824e8ca 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -985,6 +985,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
985} 985}
986 986
987/** 987/**
988 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
989 * @data: Pointer to interrupt specific data
990 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
991 *
992 * Conditional, as the underlying parent chip might not implement it.
993 */
994int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
995{
996 data = data->parent_data;
997
998 if (data->chip->irq_set_type)
999 return data->chip->irq_set_type(data, type);
1000
1001 return -ENOSYS;
1002}
1003
1004/**
988 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1005 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
989 * @data: Pointer to interrupt specific data 1006 * @data: Pointer to interrupt specific data
990 * 1007 *
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
997 if (data->chip && data->chip->irq_retrigger) 1014 if (data->chip && data->chip->irq_retrigger)
998 return data->chip->irq_retrigger(data); 1015 return data->chip->irq_retrigger(data);
999 1016
1000 return -ENOSYS; 1017 return 0;
1001} 1018}
1002 1019
1003/** 1020/**
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/hash.h> 5#include <linux/hash.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/debug_locks.h>
7 8
8/* 9/*
9 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead 10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
286{ 287{
287 struct __qspinlock *l = (void *)lock; 288 struct __qspinlock *l = (void *)lock;
288 struct pv_node *node; 289 struct pv_node *node;
290 u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
289 291
290 /* 292 /*
291 * We must not unlock if SLOW, because in that case we must first 293 * We must not unlock if SLOW, because in that case we must first
292 * unhash. Otherwise it would be possible to have multiple @lock 294 * unhash. Otherwise it would be possible to have multiple @lock
293 * entries, which would be BAD. 295 * entries, which would be BAD.
294 */ 296 */
295 if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) 297 if (likely(lockval == _Q_LOCKED_VAL))
296 return; 298 return;
297 299
300 if (unlikely(lockval != _Q_SLOW_VAL)) {
301 if (debug_locks_silent)
302 return;
303 WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
304 return;
305 }
306
298 /* 307 /*
299 * Since the above failed to release, this must be the SLOW path. 308 * Since the above failed to release, this must be the SLOW path.
300 * Therefore start by looking up the blocked node and unhashing it. 309 * Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5e097fa9faf7..84190f02b521 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
807 spin_unlock(&base->lock); 807 spin_unlock(&base->lock);
808 base = new_base; 808 base = new_base;
809 spin_lock(&base->lock); 809 spin_lock(&base->lock);
810 timer->flags &= ~TIMER_BASEMASK; 810 WRITE_ONCE(timer->flags,
811 timer->flags |= base->cpu; 811 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
812 } 812 }
813 } 813 }
814 814
diff --git a/mm/cma.h b/mm/cma.h
index 1132d733556d..17c75a4246c8 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
16extern struct cma cma_areas[MAX_CMA_AREAS]; 16extern struct cma cma_areas[MAX_CMA_AREAS];
17extern unsigned cma_area_count; 17extern unsigned cma_area_count;
18 18
19static unsigned long cma_bitmap_maxno(struct cma *cma) 19static inline unsigned long cma_bitmap_maxno(struct cma *cma)
20{ 20{
21 return cma->count >> cma->order_per_bit; 21 return cma->count >> cma->order_per_bit;
22} 22}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a63ea84..7b28e9cdf1c7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
2 * This file contains shadow memory manipulation code. 2 * This file contains shadow memory manipulation code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceedf810a..e07c94fbd0ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
2 * This file contains error reporting code. 2 * This file contains error reporting code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ea5a93659488..1f4446a90cef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1146,8 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1146 } 1146 }
1147 1147
1148 if (!PageHuge(p) && PageTransHuge(hpage)) { 1148 if (!PageHuge(p) && PageTransHuge(hpage)) {
1149 if (unlikely(split_huge_page(hpage))) { 1149 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1150 pr_err("MCE: %#lx: thp split failed\n", pfn); 1150 if (!PageAnon(hpage))
1151 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1152 else
1153 pr_err("MCE: %#lx: thp split failed\n", pfn);
1151 if (TestClearPageHWPoison(p)) 1154 if (TestClearPageHWPoison(p))
1152 atomic_long_sub(nr_pages, &num_poisoned_pages); 1155 atomic_long_sub(nr_pages, &num_poisoned_pages);
1153 put_page(p); 1156 put_page(p);
@@ -1538,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
1538 */ 1541 */
1539 ret = __get_any_page(page, pfn, 0); 1542 ret = __get_any_page(page, pfn, 0);
1540 if (!PageLRU(page)) { 1543 if (!PageLRU(page)) {
1544 /* Drop page reference which is from __get_any_page() */
1545 put_page(page);
1541 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", 1546 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1542 pfn, page->flags); 1547 pfn, page->flags);
1543 return -EIO; 1548 return -EIO;
@@ -1567,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
1567 unlock_page(hpage); 1572 unlock_page(hpage);
1568 1573
1569 ret = isolate_huge_page(hpage, &pagelist); 1574 ret = isolate_huge_page(hpage, &pagelist);
1570 if (ret) { 1575 /*
1571 /* 1576 * get_any_page() and isolate_huge_page() takes a refcount each,
1572 * get_any_page() and isolate_huge_page() takes a refcount each, 1577 * so need to drop one here.
1573 * so need to drop one here. 1578 */
1574 */ 1579 put_page(hpage);
1575 put_page(hpage); 1580 if (!ret) {
1576 } else {
1577 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); 1581 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1578 return -EBUSY; 1582 return -EBUSY;
1579 } 1583 }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 003dbe4b060d..6da82bcb0a8b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1277,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
1277 1277
1278 /* create new memmap entry */ 1278 /* create new memmap entry */
1279 firmware_map_add_hotplug(start, start + size, "System RAM"); 1279 firmware_map_add_hotplug(start, start + size, "System RAM");
1280 memblock_add_node(start, size, nid);
1280 1281
1281 goto out; 1282 goto out;
1282 1283
@@ -2013,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
2013 2014
2014 /* remove memmap entry */ 2015 /* remove memmap entry */
2015 firmware_map_remove(start, start + size, "System RAM"); 2016 firmware_map_remove(start, start + size, "System RAM");
2017 memblock_free(start, size);
2018 memblock_remove(start, size);
2016 2019
2017 arch_remove_memory(start, size); 2020 arch_remove_memory(start, size);
2018 2021
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index beda41710802..5b5240b7f642 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1343,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1343 set_page_owner(page, order, gfp_flags); 1343 set_page_owner(page, order, gfp_flags);
1344 1344
1345 /* 1345 /*
1346 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to 1346 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1347 * allocate the page. The expectation is that the caller is taking 1347 * allocate the page. The expectation is that the caller is taking
1348 * steps that will free more memory. The caller should avoid the page 1348 * steps that will free more memory. The caller should avoid the page
1349 * being used for !PFMEMALLOC purposes. 1349 * being used for !PFMEMALLOC purposes.
1350 */ 1350 */
1351 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); 1351 if (alloc_flags & ALLOC_NO_WATERMARKS)
1352 set_page_pfmemalloc(page);
1353 else
1354 clear_page_pfmemalloc(page);
1352 1355
1353 return 0; 1356 return 0;
1354} 1357}
@@ -3345,7 +3348,7 @@ refill:
3345 atomic_add(size - 1, &page->_count); 3348 atomic_add(size - 1, &page->_count);
3346 3349
3347 /* reset page count bias and offset to start of new frag */ 3350 /* reset page count bias and offset to start of new frag */
3348 nc->pfmemalloc = page->pfmemalloc; 3351 nc->pfmemalloc = page_is_pfmemalloc(page);
3349 nc->pagecnt_bias = size; 3352 nc->pagecnt_bias = size;
3350 nc->offset = size; 3353 nc->offset = size;
3351 } 3354 }
@@ -5060,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5060{ 5063{
5061 unsigned long zone_start_pfn, zone_end_pfn; 5064 unsigned long zone_start_pfn, zone_end_pfn;
5062 5065
5066 /* When hotadd a new node, the node should be empty */
5067 if (!node_start_pfn && !node_end_pfn)
5068 return 0;
5069
5063 /* Get the start and end of the zone */ 5070 /* Get the start and end of the zone */
5064 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5071 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5065 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5072 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5123,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
5123 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5130 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5124 unsigned long zone_start_pfn, zone_end_pfn; 5131 unsigned long zone_start_pfn, zone_end_pfn;
5125 5132
5133 /* When hotadd a new node, the node should be empty */
5134 if (!node_start_pfn && !node_end_pfn)
5135 return 0;
5136
5126 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5137 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5127 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5138 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5128 5139
diff --git a/mm/slab.c b/mm/slab.c
index 200e22412a16..bbd0b47dc6a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1603 } 1603 }
1604 1604
1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1606 if (unlikely(page->pfmemalloc)) 1606 if (page_is_pfmemalloc(page))
1607 pfmemalloc_active = true; 1607 pfmemalloc_active = true;
1608 1608
1609 nr_pages = (1 << cachep->gfporder); 1609 nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1614 add_zone_page_state(page_zone(page), 1614 add_zone_page_state(page_zone(page),
1615 NR_SLAB_UNRECLAIMABLE, nr_pages); 1615 NR_SLAB_UNRECLAIMABLE, nr_pages);
1616 __SetPageSlab(page); 1616 __SetPageSlab(page);
1617 if (page->pfmemalloc) 1617 if (page_is_pfmemalloc(page))
1618 SetPageSlabPfmemalloc(page); 1618 SetPageSlabPfmemalloc(page);
1619 1619
1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
diff --git a/mm/slub.c b/mm/slub.c
index 816df0016555..f68c0e50f3c0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1427 inc_slabs_node(s, page_to_nid(page), page->objects); 1427 inc_slabs_node(s, page_to_nid(page), page->objects);
1428 page->slab_cache = s; 1428 page->slab_cache = s;
1429 __SetPageSlab(page); 1429 __SetPageSlab(page);
1430 if (page->pfmemalloc) 1430 if (page_is_pfmemalloc(page))
1431 SetPageSlabPfmemalloc(page); 1431 SetPageSlabPfmemalloc(page);
1432 1432
1433 start = page_address(page); 1433 start = page_address(page);
diff --git a/net/9p/client.c b/net/9p/client.c
index 498454b3c06c..ea79ee9a7348 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
1541 struct p9_client *clnt = fid->clnt; 1541 struct p9_client *clnt = fid->clnt;
1542 struct p9_req_t *req; 1542 struct p9_req_t *req;
1543 int total = 0; 1543 int total = 0;
1544 *err = 0;
1544 1545
1545 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", 1546 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1546 fid->fid, (unsigned long long) offset, (int)iov_iter_count(to)); 1547 fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
1620 struct p9_client *clnt = fid->clnt; 1621 struct p9_client *clnt = fid->clnt;
1621 struct p9_req_t *req; 1622 struct p9_req_t *req;
1622 int total = 0; 1623 int total = 0;
1624 *err = 0;
1623 1625
1624 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n", 1626 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
1625 fid->fid, (unsigned long long) offset, 1627 fid->fid, (unsigned long long) offset,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6aed096..6d0b471eede8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
1138 * @bat_priv: the bat priv with all the soft interface information 1138 * @bat_priv: the bat priv with all the soft interface information
1139 * @skb: packet to check 1139 * @skb: packet to check
1140 * @hdr_size: size of the encapsulation header 1140 * @hdr_size: size of the encapsulation header
1141 *
1142 * Returns true if the packet was snooped and consumed by DAT. False if the
1143 * packet has to be delivered to the interface
1141 */ 1144 */
1142bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, 1145bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1143 struct sk_buff *skb, int hdr_size) 1146 struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1145 uint16_t type; 1148 uint16_t type;
1146 __be32 ip_src, ip_dst; 1149 __be32 ip_src, ip_dst;
1147 uint8_t *hw_src, *hw_dst; 1150 uint8_t *hw_src, *hw_dst;
1148 bool ret = false; 1151 bool dropped = false;
1149 unsigned short vid; 1152 unsigned short vid;
1150 1153
1151 if (!atomic_read(&bat_priv->distributed_arp_table)) 1154 if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1174 /* if this REPLY is directed to a client of mine, let's deliver the 1177 /* if this REPLY is directed to a client of mine, let's deliver the
1175 * packet to the interface 1178 * packet to the interface
1176 */ 1179 */
1177 ret = !batadv_is_my_client(bat_priv, hw_dst, vid); 1180 dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
1181
1182 /* if this REPLY is sent on behalf of a client of mine, let's drop the
1183 * packet because the client will reply by itself
1184 */
1185 dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
1178out: 1186out:
1179 if (ret) 1187 if (dropped)
1180 kfree_skb(skb); 1188 kfree_skb(skb);
1181 /* if ret == false -> packet has to be delivered to the interface */ 1189 /* if dropped == false -> deliver to the interface */
1182 return ret; 1190 return dropped;
1183} 1191}
1184 1192
1185/** 1193/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb0158620628..cffa92dd9877 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
439 439
440 INIT_HLIST_NODE(&gw_node->list); 440 INIT_HLIST_NODE(&gw_node->list);
441 gw_node->orig_node = orig_node; 441 gw_node->orig_node = orig_node;
442 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
443 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
442 atomic_set(&gw_node->refcount, 1); 444 atomic_set(&gw_node->refcount, 1);
443 445
444 spin_lock_bh(&bat_priv->gw.list_lock); 446 spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961da75d..a2fc843c2243 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@ out:
479 */ 479 */
480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) 480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
481{ 481{
482 if (!vlan)
483 return;
484
482 if (atomic_dec_and_test(&vlan->refcount)) { 485 if (atomic_dec_and_test(&vlan->refcount)) {
483 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); 486 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
484 hlist_del_rcu(&vlan->list); 487 hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b4824951010b..5809b39c1922 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
594 594
595 /* increase the refcounter of the related vlan */ 595 /* increase the refcounter of the related vlan */
596 vlan = batadv_softif_vlan_get(bat_priv, vid); 596 vlan = batadv_softif_vlan_get(bat_priv, vid);
597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
598 addr, BATADV_PRINT_VID(vid))) {
599 kfree(tt_local);
600 tt_local = NULL;
601 goto out;
602 }
597 603
598 batadv_dbg(BATADV_DBG_TT, bat_priv, 604 batadv_dbg(BATADV_DBG_TT, bat_priv,
599 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 605 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1034 struct batadv_tt_local_entry *tt_local_entry; 1040 struct batadv_tt_local_entry *tt_local_entry;
1035 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1041 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1036 struct batadv_softif_vlan *vlan; 1042 struct batadv_softif_vlan *vlan;
1043 void *tt_entry_exists;
1037 1044
1038 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1045 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1039 if (!tt_local_entry) 1046 if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1061 * immediately purge it 1068 * immediately purge it
1062 */ 1069 */
1063 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); 1070 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
1064 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1071
1072 tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
1073 batadv_compare_tt,
1074 batadv_choose_tt,
1075 &tt_local_entry->common);
1076 if (!tt_entry_exists)
1077 goto out;
1078
1079 /* extra call to free the local tt entry */
1065 batadv_tt_local_entry_free_ref(tt_local_entry); 1080 batadv_tt_local_entry_free_ref(tt_local_entry);
1066 1081
1067 /* decrease the reference held for this vlan */ 1082 /* decrease the reference held for this vlan */
1068 vlan = batadv_softif_vlan_get(bat_priv, vid); 1083 vlan = batadv_softif_vlan_get(bat_priv, vid);
1084 if (!vlan)
1085 goto out;
1086
1069 batadv_softif_vlan_free_ref(vlan); 1087 batadv_softif_vlan_free_ref(vlan);
1070 batadv_softif_vlan_free_ref(vlan); 1088 batadv_softif_vlan_free_ref(vlan);
1071 1089
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1166 /* decrease the reference held for this vlan */ 1184 /* decrease the reference held for this vlan */
1167 vlan = batadv_softif_vlan_get(bat_priv, 1185 vlan = batadv_softif_vlan_get(bat_priv,
1168 tt_common_entry->vid); 1186 tt_common_entry->vid);
1169 batadv_softif_vlan_free_ref(vlan); 1187 if (vlan) {
1170 batadv_softif_vlan_free_ref(vlan); 1188 batadv_softif_vlan_free_ref(vlan);
1189 batadv_softif_vlan_free_ref(vlan);
1190 }
1171 1191
1172 batadv_tt_local_entry_free_ref(tt_local); 1192 batadv_tt_local_entry_free_ref(tt_local);
1173 } 1193 }
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3207 3227
3208 /* decrease the reference held for this vlan */ 3228 /* decrease the reference held for this vlan */
3209 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); 3229 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3210 batadv_softif_vlan_free_ref(vlan); 3230 if (vlan) {
3211 batadv_softif_vlan_free_ref(vlan); 3231 batadv_softif_vlan_free_ref(vlan);
3232 batadv_softif_vlan_free_ref(vlan);
3233 }
3212 3234
3213 batadv_tt_local_entry_free_ref(tt_local); 3235 batadv_tt_local_entry_free_ref(tt_local);
3214 } 3236 }
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb279165..92720f3fe573 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7820 /* Make sure we copy only the significant bytes based on the 7820 /* Make sure we copy only the significant bytes based on the
7821 * encryption key size, and set the rest of the value to zeroes. 7821 * encryption key size, and set the rest of the value to zeroes.
7822 */ 7822 */
7823 memcpy(ev.key.val, key->val, sizeof(key->enc_size)); 7823 memcpy(ev.key.val, key->val, key->enc_size);
7824 memset(ev.key.val + key->enc_size, 0, 7824 memset(ev.key.val + key->enc_size, 0,
7825 sizeof(ev.key.val) - key->enc_size); 7825 sizeof(ev.key.val) - key->enc_size);
7826 7826
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0b39dcc65b94..1285eaf5dc22 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1591 break; 1591 break;
1592 } 1592 }
1593 1593
1594 if (skb_trimmed) 1594 if (skb_trimmed && skb_trimmed != skb)
1595 kfree_skb(skb_trimmed); 1595 kfree_skb(skb_trimmed);
1596 1596
1597 return err; 1597 return err;
@@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1636 break; 1636 break;
1637 } 1637 }
1638 1638
1639 if (skb_trimmed) 1639 if (skb_trimmed && skb_trimmed != skb)
1640 kfree_skb(skb_trimmed); 1640 kfree_skb(skb_trimmed);
1641 1641
1642 return err; 1642 return err;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3da5525eb8a2..4d74a0639c4c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
115 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
116 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
115 + 0; 117 + 0;
116} 118}
117 119
@@ -506,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
506 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 508 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
507 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 509 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
508 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 510 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
511 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
512 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
509}; 513};
510 514
511/* Change the state of the port and notify spanning tree */ 515/* Change the state of the port and notify spanning tree */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4967262b2707..617088aee21d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,12 +131,12 @@ out_noerr:
131 goto out; 131 goto out;
132} 132}
133 133
134static int skb_set_peeked(struct sk_buff *skb) 134static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
135{ 135{
136 struct sk_buff *nskb; 136 struct sk_buff *nskb;
137 137
138 if (skb->peeked) 138 if (skb->peeked)
139 return 0; 139 return skb;
140 140
141 /* We have to unshare an skb before modifying it. */ 141 /* We have to unshare an skb before modifying it. */
142 if (!skb_shared(skb)) 142 if (!skb_shared(skb))
@@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb)
144 144
145 nskb = skb_clone(skb, GFP_ATOMIC); 145 nskb = skb_clone(skb, GFP_ATOMIC);
146 if (!nskb) 146 if (!nskb)
147 return -ENOMEM; 147 return ERR_PTR(-ENOMEM);
148 148
149 skb->prev->next = nskb; 149 skb->prev->next = nskb;
150 skb->next->prev = nskb; 150 skb->next->prev = nskb;
@@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb)
157done: 157done:
158 skb->peeked = 1; 158 skb->peeked = 1;
159 159
160 return 0; 160 return skb;
161} 161}
162 162
163/** 163/**
@@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
229 continue; 229 continue;
230 } 230 }
231 231
232 error = skb_set_peeked(skb); 232 skb = skb_set_peeked(skb);
233 if (error) 233 error = PTR_ERR(skb);
234 if (IS_ERR(skb))
234 goto unlock_err; 235 goto unlock_err;
235 236
236 atomic_inc(&skb->users); 237 atomic_inc(&skb->users);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1ebdf1c0d118..1cbd209192ea 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
3514 3514
3515 set_freezable(); 3515 set_freezable();
3516 3516
3517 __set_current_state(TASK_RUNNING);
3518
3519 while (!kthread_should_stop()) { 3517 while (!kthread_should_stop()) {
3520 pkt_dev = next_to_run(t); 3518 pkt_dev = next_to_run(t);
3521 3519
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
3560 3558
3561 try_to_freeze(); 3559 try_to_freeze();
3562 } 3560 }
3563 set_current_state(TASK_INTERRUPTIBLE);
3564 3561
3565 pr_debug("%s stopping all device\n", t->tsk->comm); 3562 pr_debug("%s stopping all device\n", t->tsk->comm);
3566 pktgen_stop(t); 3563 pktgen_stop(t);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0bc08c..b42f0e26f89e 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
103 spin_lock_bh(&queue->syn_wait_lock); 103 spin_lock_bh(&queue->syn_wait_lock);
104 while ((req = lopt->syn_table[i]) != NULL) { 104 while ((req = lopt->syn_table[i]) != NULL) {
105 lopt->syn_table[i] = req->dl_next; 105 lopt->syn_table[i] = req->dl_next;
106 /* Because of following del_timer_sync(),
107 * we must release the spinlock here
108 * or risk a dead lock.
109 */
110 spin_unlock_bh(&queue->syn_wait_lock);
106 atomic_inc(&lopt->qlen_dec); 111 atomic_inc(&lopt->qlen_dec);
107 if (del_timer(&req->rsk_timer)) 112 if (del_timer_sync(&req->rsk_timer))
108 reqsk_put(req); 113 reqsk_put(req);
109 reqsk_put(req); 114 reqsk_put(req);
115 spin_lock_bh(&queue->syn_wait_lock);
110 } 116 }
111 spin_unlock_bh(&queue->syn_wait_lock); 117 spin_unlock_bh(&queue->syn_wait_lock);
112 } 118 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca0f99e..7b84330e5d30 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
340 340
341 if (skb && frag_size) { 341 if (skb && frag_size) {
342 skb->head_frag = 1; 342 skb->head_frag = 1;
343 if (virt_to_head_page(data)->pfmemalloc) 343 if (page_is_pfmemalloc(virt_to_head_page(data)))
344 skb->pfmemalloc = 1; 344 skb->pfmemalloc = 1;
345 } 345 }
346 return skb; 346 return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
4022 * Otherwise returns the provided skb. Returns NULL in error cases 4022 * Otherwise returns the provided skb. Returns NULL in error cases
4023 * (e.g. transport_len exceeds skb length or out-of-memory). 4023 * (e.g. transport_len exceeds skb length or out-of-memory).
4024 * 4024 *
4025 * Caller needs to set the skb transport header and release the returned skb. 4025 * Caller needs to set the skb transport header and free any returned skb if it
4026 * Provided skb is consumed. 4026 * differs from the provided skb.
4027 */ 4027 */
4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4029 unsigned int transport_len) 4029 unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4032 unsigned int len = skb_transport_offset(skb) + transport_len; 4032 unsigned int len = skb_transport_offset(skb) + transport_len;
4033 int ret; 4033 int ret;
4034 4034
4035 if (skb->len < len) { 4035 if (skb->len < len)
4036 kfree_skb(skb);
4037 return NULL; 4036 return NULL;
4038 } else if (skb->len == len) { 4037 else if (skb->len == len)
4039 return skb; 4038 return skb;
4040 }
4041 4039
4042 skb_chk = skb_clone(skb, GFP_ATOMIC); 4040 skb_chk = skb_clone(skb, GFP_ATOMIC);
4043 kfree_skb(skb);
4044
4045 if (!skb_chk) 4041 if (!skb_chk)
4046 return NULL; 4042 return NULL;
4047 4043
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4066 * If the skb has data beyond the given transport length, then a 4062 * If the skb has data beyond the given transport length, then a
4067 * trimmed & cloned skb is checked and returned. 4063 * trimmed & cloned skb is checked and returned.
4068 * 4064 *
4069 * Caller needs to set the skb transport header and release the returned skb. 4065 * Caller needs to set the skb transport header and free any returned skb if it
4070 * Provided skb is consumed. 4066 * differs from the provided skb.
4071 */ 4067 */
4072struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4068struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4073 unsigned int transport_len, 4069 unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4079 4075
4080 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4076 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4081 if (!skb_chk) 4077 if (!skb_chk)
4082 return NULL; 4078 goto err;
4083 4079
4084 if (!pskb_may_pull(skb_chk, offset)) { 4080 if (!pskb_may_pull(skb_chk, offset))
4085 kfree_skb(skb_chk); 4081 goto err;
4086 return NULL;
4087 }
4088 4082
4089 __skb_pull(skb_chk, offset); 4083 __skb_pull(skb_chk, offset);
4090 ret = skb_chkf(skb_chk); 4084 ret = skb_chkf(skb_chk);
4091 __skb_push(skb_chk, offset); 4085 __skb_push(skb_chk, offset);
4092 4086
4093 if (ret) { 4087 if (ret)
4094 kfree_skb(skb_chk); 4088 goto err;
4095 return NULL;
4096 }
4097 4089
4098 return skb_chk; 4090 return skb_chk;
4091
4092err:
4093 if (skb_chk && skb_chk != skb)
4094 kfree_skb(skb_chk);
4095
4096 return NULL;
4097
4099} 4098}
4100EXPORT_SYMBOL(skb_checksum_trimmed); 4099EXPORT_SYMBOL(skb_checksum_trimmed);
4101 4100
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123790ea..35c47ddd04f0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
756 return -ENODEV; 756 return -ENODEV;
757 757
758 /* Use already configured phy mode */ 758 /* Use already configured phy mode */
759 p->phy_interface = p->phy->interface; 759 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
760 p->phy_interface = p->phy->interface;
760 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 761 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
761 p->phy_interface); 762 p->phy_interface);
762 763
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 37c4bb89a708..b0c6258ffb79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2465 key = l->key + 1; 2465 key = l->key + 1;
2466 iter->pos++; 2466 iter->pos++;
2467 2467
2468 if (pos-- <= 0) 2468 if (--pos <= 0)
2469 break; 2469 break;
2470 2470
2471 l = NULL; 2471 l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf648ec4..9fdfd9deac11 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1435 struct sk_buff *skb_chk; 1435 struct sk_buff *skb_chk;
1436 unsigned int transport_len; 1436 unsigned int transport_len;
1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
1438 int ret; 1438 int ret = -EINVAL;
1439 1439
1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
1441 1441
1442 skb_get(skb);
1443 skb_chk = skb_checksum_trimmed(skb, transport_len, 1442 skb_chk = skb_checksum_trimmed(skb, transport_len,
1444 ip_mc_validate_checksum); 1443 ip_mc_validate_checksum);
1445 if (!skb_chk) 1444 if (!skb_chk)
1446 return -EINVAL; 1445 goto err;
1447 1446
1448 if (!pskb_may_pull(skb_chk, len)) { 1447 if (!pskb_may_pull(skb_chk, len))
1449 kfree_skb(skb_chk); 1448 goto err;
1450 return -EINVAL;
1451 }
1452 1449
1453 ret = ip_mc_check_igmp_msg(skb_chk); 1450 ret = ip_mc_check_igmp_msg(skb_chk);
1454 if (ret) { 1451 if (ret)
1455 kfree_skb(skb_chk); 1452 goto err;
1456 return ret;
1457 }
1458 1453
1459 if (skb_trimmed) 1454 if (skb_trimmed)
1460 *skb_trimmed = skb_chk; 1455 *skb_trimmed = skb_chk;
1461 else 1456 /* free now unneeded clone */
1457 else if (skb_chk != skb)
1462 kfree_skb(skb_chk); 1458 kfree_skb(skb_chk);
1463 1459
1464 return 0; 1460 ret = 0;
1461
1462err:
1463 if (ret && skb_chk && skb_chk != skb)
1464 kfree_skb(skb_chk);
1465
1466 return ret;
1465} 1467}
1466 1468
1467/** 1469/**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1470 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) 1472 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
1471 * 1473 *
1472 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1474 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
1473 * skb network and transport headers accordingly and returns zero. 1475 * skb transport header accordingly and returns zero.
1474 * 1476 *
1475 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1477 * -EINVAL: A broken packet was detected, i.e. it violates some internet
1476 * standard 1478 * standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1485 * to leave the original skb and its full frame unchanged (which might be 1487 * to leave the original skb and its full frame unchanged (which might be
1486 * desirable for layer 2 frame jugglers). 1488 * desirable for layer 2 frame jugglers).
1487 * 1489 *
1488 * The caller needs to release a reference count from any returned skb_trimmed. 1490 * Caller needs to set the skb network header and free any returned skb if it
1491 * differs from the provided skb.
1489 */ 1492 */
1490int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) 1493int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1491{ 1494{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0d9326..134957159c27 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
593 } 593 }
594 594
595 spin_unlock(&queue->syn_wait_lock); 595 spin_unlock(&queue->syn_wait_lock);
596 if (del_timer(&req->rsk_timer)) 596 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
597 reqsk_put(req); 597 reqsk_put(req);
598 return found; 598 return found;
599} 599}
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc183411e..95ea633e8356 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
226 226
227 synproxy_build_options(nth, opts); 227 synproxy_build_options(nth, opts);
228 228
229 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 229 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
230 niph, nth, tcp_hdr_size);
230} 231}
231 232
232static bool 233static bool
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231ccfb17..0330ab2e2b63 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
41static int tcp_syn_retries_max = MAX_TCP_SYNCNT; 41static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
42static int ip_ping_group_range_min[] = { 0, 0 }; 42static int ip_ping_group_range_min[] = { 0, 0 };
43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
44static int min_sndbuf = SOCK_MIN_SNDBUF;
45static int min_rcvbuf = SOCK_MIN_RCVBUF;
46 44
47/* Update system visible IP port range */ 45/* Update system visible IP port range */
48static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
530 .maxlen = sizeof(sysctl_tcp_wmem), 528 .maxlen = sizeof(sysctl_tcp_wmem),
531 .mode = 0644, 529 .mode = 0644,
532 .proc_handler = proc_dointvec_minmax, 530 .proc_handler = proc_dointvec_minmax,
533 .extra1 = &min_sndbuf, 531 .extra1 = &one,
534 }, 532 },
535 { 533 {
536 .procname = "tcp_notsent_lowat", 534 .procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
545 .maxlen = sizeof(sysctl_tcp_rmem), 543 .maxlen = sizeof(sysctl_tcp_rmem),
546 .mode = 0644, 544 .mode = 0644,
547 .proc_handler = proc_dointvec_minmax, 545 .proc_handler = proc_dointvec_minmax,
548 .extra1 = &min_rcvbuf, 546 .extra1 = &one,
549 }, 547 },
550 { 548 {
551 .procname = "tcp_app_win", 549 .procname = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
758 .maxlen = sizeof(sysctl_udp_rmem_min), 756 .maxlen = sizeof(sysctl_udp_rmem_min),
759 .mode = 0644, 757 .mode = 0644,
760 .proc_handler = proc_dointvec_minmax, 758 .proc_handler = proc_dointvec_minmax,
761 .extra1 = &min_rcvbuf, 759 .extra1 = &one
762 }, 760 },
763 { 761 {
764 .procname = "udp_wmem_min", 762 .procname = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
766 .maxlen = sizeof(sysctl_udp_wmem_min), 764 .maxlen = sizeof(sysctl_udp_wmem_min),
767 .mode = 0644, 765 .mode = 0644,
768 .proc_handler = proc_dointvec_minmax, 766 .proc_handler = proc_dointvec_minmax,
769 .extra1 = &min_sndbuf, 767 .extra1 = &one
770 }, 768 },
771 { } 769 { }
772}; 770};
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b79cf2..0ea2e1c5d395 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); 1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1349 if (req) { 1349 if (req) {
1350 nsk = tcp_check_req(sk, skb, req, false); 1350 nsk = tcp_check_req(sk, skb, req, false);
1351 if (!nsk) 1351 if (!nsk || nsk == sk)
1352 reqsk_put(req); 1352 reqsk_put(req);
1353 return nsk; 1353 return nsk;
1354 } 1354 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604f9273..1b8c5ba7d5f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
1995 1995
1996 skb->sk = sk; 1996 skb->sk = sk;
1997 skb->destructor = sock_efree; 1997 skb->destructor = sock_efree;
1998 dst = sk->sk_rx_dst; 1998 dst = READ_ONCE(sk->sk_rx_dst);
1999 1999
2000 if (dst) 2000 if (dst)
2001 dst = dst_check(dst, 0); 2001 dst = dst_check(dst, 0);
2002 if (dst) 2002 if (dst) {
2003 skb_dst_set_noref(skb, dst); 2003 /* DST_NOCACHE can not be used without taking a reference */
2004 if (dst->flags & DST_NOCACHE) {
2005 if (likely(atomic_inc_not_zero(&dst->__refcnt)))
2006 skb_dst_set(skb, dst);
2007 } else {
2008 skb_dst_set_noref(skb, dst);
2009 }
2010 }
2004} 2011}
2005 2012
2006int udp_rcv(struct sk_buff *skb) 2013int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..548c6237b1e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
172 *ppcpu_rt = NULL; 172 *ppcpu_rt = NULL;
173 } 173 }
174 } 174 }
175
176 non_pcpu_rt->rt6i_pcpu = NULL;
175} 177}
176 178
177static void rt6_release(struct rt6_info *rt) 179static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5ab31e..9405b04eecc6 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
143 struct sk_buff *skb_chk = NULL; 143 struct sk_buff *skb_chk = NULL;
144 unsigned int transport_len; 144 unsigned int transport_len;
145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); 145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
146 int ret; 146 int ret = -EINVAL;
147 147
148 transport_len = ntohs(ipv6_hdr(skb)->payload_len); 148 transport_len = ntohs(ipv6_hdr(skb)->payload_len);
149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); 149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
150 150
151 skb_get(skb);
152 skb_chk = skb_checksum_trimmed(skb, transport_len, 151 skb_chk = skb_checksum_trimmed(skb, transport_len,
153 ipv6_mc_validate_checksum); 152 ipv6_mc_validate_checksum);
154 if (!skb_chk) 153 if (!skb_chk)
155 return -EINVAL; 154 goto err;
156 155
157 if (!pskb_may_pull(skb_chk, len)) { 156 if (!pskb_may_pull(skb_chk, len))
158 kfree_skb(skb_chk); 157 goto err;
159 return -EINVAL;
160 }
161 158
162 ret = ipv6_mc_check_mld_msg(skb_chk); 159 ret = ipv6_mc_check_mld_msg(skb_chk);
163 if (ret) { 160 if (ret)
164 kfree_skb(skb_chk); 161 goto err;
165 return ret;
166 }
167 162
168 if (skb_trimmed) 163 if (skb_trimmed)
169 *skb_trimmed = skb_chk; 164 *skb_trimmed = skb_chk;
170 else 165 /* free now unneeded clone */
166 else if (skb_chk != skb)
171 kfree_skb(skb_chk); 167 kfree_skb(skb_chk);
172 168
173 return 0; 169 ret = 0;
170
171err:
172 if (ret && skb_chk && skb_chk != skb)
173 kfree_skb(skb_chk);
174
175 return ret;
174} 176}
175 177
176/** 178/**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
179 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) 181 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
180 * 182 *
181 * Checks whether an IPv6 packet is a valid MLD packet. If so sets 183 * Checks whether an IPv6 packet is a valid MLD packet. If so sets
182 * skb network and transport headers accordingly and returns zero. 184 * skb transport header accordingly and returns zero.
183 * 185 *
184 * -EINVAL: A broken packet was detected, i.e. it violates some internet 186 * -EINVAL: A broken packet was detected, i.e. it violates some internet
185 * standard 187 * standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
194 * to leave the original skb and its full frame unchanged (which might be 196 * to leave the original skb and its full frame unchanged (which might be
195 * desirable for layer 2 frame jugglers). 197 * desirable for layer 2 frame jugglers).
196 * 198 *
197 * The caller needs to release a reference count from any returned skb_trimmed. 199 * Caller needs to set the skb network header and free any returned skb if it
200 * differs from the provided skb.
198 */ 201 */
199int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) 202int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
200{ 203{
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b106de7..ebbb754c2111 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
37} 37}
38 38
39static void 39static void
40synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, 40synproxy_send_tcp(const struct synproxy_net *snet,
41 const struct sk_buff *skb, struct sk_buff *nskb,
41 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
42 struct ipv6hdr *niph, struct tcphdr *nth, 43 struct ipv6hdr *niph, struct tcphdr *nth,
43 unsigned int tcp_hdr_size) 44 unsigned int tcp_hdr_size)
44{ 45{
45 struct net *net = nf_ct_net((struct nf_conn *)nfct); 46 struct net *net = nf_ct_net(snet->tmpl);
46 struct dst_entry *dst; 47 struct dst_entry *dst;
47 struct flowi6 fl6; 48 struct flowi6 fl6;
48 49
@@ -83,7 +84,8 @@ free_nskb:
83} 84}
84 85
85static void 86static void
86synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, 87synproxy_send_client_synack(const struct synproxy_net *snet,
88 const struct sk_buff *skb, const struct tcphdr *th,
87 const struct synproxy_options *opts) 89 const struct synproxy_options *opts)
88{ 90{
89 struct sk_buff *nskb; 91 struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
119 121
120 synproxy_build_options(nth, opts); 122 synproxy_build_options(nth, opts);
121 123
122 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
123 niph, nth, tcp_hdr_size); 125 niph, nth, tcp_hdr_size);
124} 126}
125 127
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
163 165
164 synproxy_build_options(nth, opts); 166 synproxy_build_options(nth, opts);
165 167
166 synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 168 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
167 niph, nth, tcp_hdr_size); 169 niph, nth, tcp_hdr_size);
168} 170}
169 171
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
203 205
204 synproxy_build_options(nth, opts); 206 synproxy_build_options(nth, opts);
205 207
206 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 208 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
207} 209}
208 210
209static void 211static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
241 243
242 synproxy_build_options(nth, opts); 244 synproxy_build_options(nth, opts);
243 245
244 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 246 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
247 niph, nth, tcp_hdr_size);
245} 248}
246 249
247static bool 250static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
301 XT_SYNPROXY_OPT_SACK_PERM | 304 XT_SYNPROXY_OPT_SACK_PERM |
302 XT_SYNPROXY_OPT_ECN); 305 XT_SYNPROXY_OPT_ECN);
303 306
304 synproxy_send_client_synack(skb, th, &opts); 307 synproxy_send_client_synack(snet, skb, th, &opts);
305 return NF_DROP; 308 return NF_DROP;
306 309
307 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 310 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6090969937f8..d15586490cec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
318/* allocate dst with ip6_dst_ops */ 318/* allocate dst with ip6_dst_ops */
319static struct rt6_info *__ip6_dst_alloc(struct net *net, 319static struct rt6_info *__ip6_dst_alloc(struct net *net,
320 struct net_device *dev, 320 struct net_device *dev,
321 int flags, 321 int flags)
322 struct fib6_table *table)
323{ 322{
324 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 323 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
325 0, DST_OBSOLETE_FORCE_CHK, flags); 324 0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
336 335
337static struct rt6_info *ip6_dst_alloc(struct net *net, 336static struct rt6_info *ip6_dst_alloc(struct net *net,
338 struct net_device *dev, 337 struct net_device *dev,
339 int flags, 338 int flags)
340 struct fib6_table *table)
341{ 339{
342 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); 340 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
343 341
344 if (rt) { 342 if (rt) {
345 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); 343 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -950,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
950 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 948 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
951 ort = (struct rt6_info *)ort->dst.from; 949 ort = (struct rt6_info *)ort->dst.from;
952 950
953 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 951 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
954 0, ort->rt6i_table);
955 952
956 if (!rt) 953 if (!rt)
957 return NULL; 954 return NULL;
@@ -983,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
983 struct rt6_info *pcpu_rt; 980 struct rt6_info *pcpu_rt;
984 981
985 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), 982 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
986 rt->dst.dev, rt->dst.flags, 983 rt->dst.dev, rt->dst.flags);
987 rt->rt6i_table);
988 984
989 if (!pcpu_rt) 985 if (!pcpu_rt)
990 return NULL; 986 return NULL;
@@ -997,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
997/* It should be called with read_lock_bh(&tb6_lock) acquired */ 993/* It should be called with read_lock_bh(&tb6_lock) acquired */
998static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) 994static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
999{ 995{
1000 struct rt6_info *pcpu_rt, *prev, **p; 996 struct rt6_info *pcpu_rt, **p;
1001 997
1002 p = this_cpu_ptr(rt->rt6i_pcpu); 998 p = this_cpu_ptr(rt->rt6i_pcpu);
1003 pcpu_rt = *p; 999 pcpu_rt = *p;
1004 1000
1005 if (pcpu_rt) 1001 if (pcpu_rt) {
1006 goto done; 1002 dst_hold(&pcpu_rt->dst);
1003 rt6_dst_from_metrics_check(pcpu_rt);
1004 }
1005 return pcpu_rt;
1006}
1007
1008static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1009{
1010 struct fib6_table *table = rt->rt6i_table;
1011 struct rt6_info *pcpu_rt, *prev, **p;
1007 1012
1008 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1013 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1009 if (!pcpu_rt) { 1014 if (!pcpu_rt) {
1010 struct net *net = dev_net(rt->dst.dev); 1015 struct net *net = dev_net(rt->dst.dev);
1011 1016
1012 pcpu_rt = net->ipv6.ip6_null_entry; 1017 dst_hold(&net->ipv6.ip6_null_entry->dst);
1013 goto done; 1018 return net->ipv6.ip6_null_entry;
1014 } 1019 }
1015 1020
1016 prev = cmpxchg(p, NULL, pcpu_rt); 1021 read_lock_bh(&table->tb6_lock);
1017 if (prev) { 1022 if (rt->rt6i_pcpu) {
1018 /* If someone did it before us, return prev instead */ 1023 p = this_cpu_ptr(rt->rt6i_pcpu);
1024 prev = cmpxchg(p, NULL, pcpu_rt);
1025 if (prev) {
1026 /* If someone did it before us, return prev instead */
1027 dst_destroy(&pcpu_rt->dst);
1028 pcpu_rt = prev;
1029 }
1030 } else {
1031 /* rt has been removed from the fib6 tree
1032 * before we have a chance to acquire the read_lock.
1033 * In this case, don't brother to create a pcpu rt
1034 * since rt is going away anyway. The next
1035 * dst_check() will trigger a re-lookup.
1036 */
1019 dst_destroy(&pcpu_rt->dst); 1037 dst_destroy(&pcpu_rt->dst);
1020 pcpu_rt = prev; 1038 pcpu_rt = rt;
1021 } 1039 }
1022
1023done:
1024 dst_hold(&pcpu_rt->dst); 1040 dst_hold(&pcpu_rt->dst);
1025 rt6_dst_from_metrics_check(pcpu_rt); 1041 rt6_dst_from_metrics_check(pcpu_rt);
1042 read_unlock_bh(&table->tb6_lock);
1026 return pcpu_rt; 1043 return pcpu_rt;
1027} 1044}
1028 1045
@@ -1097,9 +1114,22 @@ redo_rt6_select:
1097 rt->dst.lastuse = jiffies; 1114 rt->dst.lastuse = jiffies;
1098 rt->dst.__use++; 1115 rt->dst.__use++;
1099 pcpu_rt = rt6_get_pcpu_route(rt); 1116 pcpu_rt = rt6_get_pcpu_route(rt);
1100 read_unlock_bh(&table->tb6_lock); 1117
1118 if (pcpu_rt) {
1119 read_unlock_bh(&table->tb6_lock);
1120 } else {
1121 /* We have to do the read_unlock first
1122 * because rt6_make_pcpu_route() may trigger
1123 * ip6_dst_gc() which will take the write_lock.
1124 */
1125 dst_hold(&rt->dst);
1126 read_unlock_bh(&table->tb6_lock);
1127 pcpu_rt = rt6_make_pcpu_route(rt);
1128 dst_release(&rt->dst);
1129 }
1101 1130
1102 return pcpu_rt; 1131 return pcpu_rt;
1132
1103 } 1133 }
1104} 1134}
1105 1135
@@ -1555,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1555 if (unlikely(!idev)) 1585 if (unlikely(!idev))
1556 return ERR_PTR(-ENODEV); 1586 return ERR_PTR(-ENODEV);
1557 1587
1558 rt = ip6_dst_alloc(net, dev, 0, NULL); 1588 rt = ip6_dst_alloc(net, dev, 0);
1559 if (unlikely(!rt)) { 1589 if (unlikely(!rt)) {
1560 in6_dev_put(idev); 1590 in6_dev_put(idev);
1561 dst = ERR_PTR(-ENOMEM); 1591 dst = ERR_PTR(-ENOMEM);
@@ -1742,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
1742 if (!table) 1772 if (!table)
1743 goto out; 1773 goto out;
1744 1774
1745 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); 1775 rt = ip6_dst_alloc(net, NULL,
1776 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1746 1777
1747 if (!rt) { 1778 if (!rt) {
1748 err = -ENOMEM; 1779 err = -ENOMEM;
@@ -1831,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
1831 int gwa_type; 1862 int gwa_type;
1832 1863
1833 gw_addr = &cfg->fc_gateway; 1864 gw_addr = &cfg->fc_gateway;
1865 gwa_type = ipv6_addr_type(gw_addr);
1834 1866
1835 /* if gw_addr is local we will fail to detect this in case 1867 /* if gw_addr is local we will fail to detect this in case
1836 * address is still TENTATIVE (DAD in progress). rt6_lookup() 1868 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1838,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
1838 * prefix route was assigned to, which might be non-loopback. 1870 * prefix route was assigned to, which might be non-loopback.
1839 */ 1871 */
1840 err = -EINVAL; 1872 err = -EINVAL;
1841 if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) 1873 if (ipv6_chk_addr_and_flags(net, gw_addr,
1874 gwa_type & IPV6_ADDR_LINKLOCAL ?
1875 dev : NULL, 0, 0))
1842 goto out; 1876 goto out;
1843 1877
1844 rt->rt6i_gateway = *gw_addr; 1878 rt->rt6i_gateway = *gw_addr;
1845 gwa_type = ipv6_addr_type(gw_addr);
1846 1879
1847 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { 1880 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1848 struct rt6_info *grt; 1881 struct rt6_info *grt;
@@ -2397,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2397{ 2430{
2398 struct net *net = dev_net(idev->dev); 2431 struct net *net = dev_net(idev->dev);
2399 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 2432 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2400 DST_NOCOUNT, NULL); 2433 DST_NOCOUNT);
2401 if (!rt) 2434 if (!rt)
2402 return ERR_PTR(-ENOMEM); 2435 return ERR_PTR(-ENOMEM);
2403 2436
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c4277aff..7a6cea5e4274 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); 943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 if (req) { 944 if (req) {
945 nsk = tcp_check_req(sk, skb, req, false); 945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk) 946 if (!nsk || nsk == sk)
947 reqsk_put(req); 947 reqsk_put(req);
948 return nsk; 948 return nsk;
949 } 949 }
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a7f6c2..3ece7d1034c8 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
92static inline void 92static inline void
93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) 93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
94{ 94{
95 int j = MAX_THR_RATES; 95 int j;
96 struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats; 96 struct minstrel_rate_stats *tmp_mrs;
97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; 97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
98 98
99 while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) > 99 for (j = MAX_THR_RATES; j > 0; --j) {
100 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
101 j--;
102 tmp_mrs = &mi->r[tp_list[j - 1]].stats; 100 tmp_mrs = &mi->r[tp_list[j - 1]].stats;
101 if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
102 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
103 break;
103 } 104 }
104 105
105 if (j < MAX_THR_RATES - 1) 106 if (j < MAX_THR_RATES - 1)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 651039ad1681..3c20d02aee73 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{ 292{
293 struct nf_conn *tmpl; 293 struct nf_conn *tmpl;
294 294
295 tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); 295 tmpl = kzalloc(sizeof(*tmpl), flags);
296 if (tmpl == NULL) 296 if (tmpl == NULL)
297 return NULL; 297 return NULL;
298 298
@@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
303 if (zone) { 303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone; 304 struct nf_conntrack_zone *nf_ct_zone;
305 305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC); 306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
307 if (!nf_ct_zone) 307 if (!nf_ct_zone)
308 goto out_free; 308 goto out_free;
309 nf_ct_zone->id = zone; 309 nf_ct_zone->id = zone;
@@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1544 sz = nr_slots * sizeof(struct hlist_nulls_head); 1544 sz = nr_slots * sizeof(struct hlist_nulls_head);
1545 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1545 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1546 get_order(sz)); 1546 get_order(sz));
1547 if (!hash) { 1547 if (!hash)
1548 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1549 hash = vzalloc(sz); 1548 hash = vzalloc(sz);
1550 }
1551 1549
1552 if (hash && nulls) 1550 if (hash && nulls)
1553 for (i = 0; i < nr_slots; i++) 1551 for (i = 0; i < nr_slots; i++)
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 71f1e9fdfa18..d7f168527903 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net)
353 int err = -ENOMEM; 353 int err = -ENOMEM;
354 354
355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
356 if (IS_ERR(ct)) { 356 if (!ct)
357 err = PTR_ERR(ct);
358 goto err1; 357 goto err1;
359 }
360 358
361 if (!nfct_seqadj_ext_add(ct)) 359 if (!nfct_seqadj_ext_add(ct))
362 goto err2; 360 goto err2;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index c6630030c912..43ddeee404e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 goto err1; 202 goto err1;
203 203
204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
205 ret = PTR_ERR(ct); 205 if (!ct) {
206 if (IS_ERR(ct)) 206 ret = -ENOMEM;
207 goto err2; 207 goto err2;
208 }
208 209
209 ret = 0; 210 ret = 0;
210 if ((info->ct_events || info->exp_events) && 211 if ((info->ct_events || info->exp_events) &&
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d8e2e3918ce2..67d210477863 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
1096 1096
1097 err = __netlink_insert(table, sk); 1097 err = __netlink_insert(table, sk);
1098 if (err) { 1098 if (err) {
1099 /* In case the hashtable backend returns with -EBUSY
1100 * from here, it must not escape to the caller.
1101 */
1102 if (unlikely(err == -EBUSY))
1103 err = -EOVERFLOW;
1099 if (err == -EEXIST) 1104 if (err == -EEXIST)
1100 err = -EADDRINUSE; 1105 err = -EADDRINUSE;
1101 nlk_sk(sk)->portid = 0; 1106 nlk_sk(sk)->portid = 0;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8b4f63..ee34f474ad14 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
273 return 0; 273 return 0;
274} 274}
275 275
276static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, 276static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
277 __be32 *addr, __be32 new_addr) 277 __be32 addr, __be32 new_addr)
278{ 278{
279 int transport_len = skb->len - skb_transport_offset(skb); 279 int transport_len = skb->len - skb_transport_offset(skb);
280 280
281 if (nh->frag_off & htons(IP_OFFSET))
282 return;
283
281 if (nh->protocol == IPPROTO_TCP) { 284 if (nh->protocol == IPPROTO_TCP) {
282 if (likely(transport_len >= sizeof(struct tcphdr))) 285 if (likely(transport_len >= sizeof(struct tcphdr)))
283 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 286 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
284 *addr, new_addr, 1); 287 addr, new_addr, 1);
285 } else if (nh->protocol == IPPROTO_UDP) { 288 } else if (nh->protocol == IPPROTO_UDP) {
286 if (likely(transport_len >= sizeof(struct udphdr))) { 289 if (likely(transport_len >= sizeof(struct udphdr))) {
287 struct udphdr *uh = udp_hdr(skb); 290 struct udphdr *uh = udp_hdr(skb);
288 291
289 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 292 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
290 inet_proto_csum_replace4(&uh->check, skb, 293 inet_proto_csum_replace4(&uh->check, skb,
291 *addr, new_addr, 1); 294 addr, new_addr, 1);
292 if (!uh->check) 295 if (!uh->check)
293 uh->check = CSUM_MANGLED_0; 296 uh->check = CSUM_MANGLED_0;
294 } 297 }
295 } 298 }
296 } 299 }
300}
297 301
302static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
303 __be32 *addr, __be32 new_addr)
304{
305 update_ip_l4_checksum(skb, nh, *addr, new_addr);
298 csum_replace4(&nh->check, *addr, new_addr); 306 csum_replace4(&nh->check, *addr, new_addr);
299 skb_clear_hash(skb); 307 skb_clear_hash(skb);
300 *addr = new_addr; 308 *addr = new_addr;
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f66187c..140a44a5f7b7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
176 176
177 /* check for all kinds of wrapping and the like */ 177 /* check for all kinds of wrapping and the like */
178 start = (unsigned long)optval; 178 start = (unsigned long)optval;
179 if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { 179 if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
180 ret = -EINVAL; 180 ret = -EINVAL;
181 goto out; 181 goto out;
182 } 182 }
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b257226..268545050ddb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
98 return ret; 98 return ret;
99 ret = ACT_P_CREATED; 99 ret = ACT_P_CREATED;
100 } else { 100 } else {
101 if (bind)
102 return 0;
101 if (!ovr) { 103 if (!ovr) {
102 tcf_hash_release(a, bind); 104 tcf_hash_release(a, bind);
103 return -EEXIST; 105 return -EEXIST;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 21ca33c9f036..a9ba030435a2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -288,10 +288,26 @@ begin:
288 288
289static void fq_codel_reset(struct Qdisc *sch) 289static void fq_codel_reset(struct Qdisc *sch)
290{ 290{
291 struct sk_buff *skb; 291 struct fq_codel_sched_data *q = qdisc_priv(sch);
292 int i;
292 293
293 while ((skb = fq_codel_dequeue(sch)) != NULL) 294 INIT_LIST_HEAD(&q->new_flows);
294 kfree_skb(skb); 295 INIT_LIST_HEAD(&q->old_flows);
296 for (i = 0; i < q->flows_cnt; i++) {
297 struct fq_codel_flow *flow = q->flows + i;
298
299 while (flow->head) {
300 struct sk_buff *skb = dequeue_head(flow);
301
302 qdisc_qstats_backlog_dec(sch, skb);
303 kfree_skb(skb);
304 }
305
306 INIT_LIST_HEAD(&flow->flowchain);
307 codel_vars_init(&flow->cvars);
308 }
309 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
310 sch->q.qlen = 0;
295} 311}
296 312
297static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 313static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522d8d22..f3d3fb42b873 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
137my $kconfig = $ARGV[1]; 137my $kconfig = $ARGV[1];
138my $lsmod_file = $ENV{'LSMOD'}; 138my $lsmod_file = $ENV{'LSMOD'};
139 139
140my @makefiles = `find $ksource -name Makefile 2>/dev/null`; 140my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
141chomp @makefiles; 141chomp @makefiles;
142 142
143my %depends; 143my %depends;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0b9847affbec..374ea53288ca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5190,6 +5190,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5190 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5190 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5191 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5191 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5192 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5193 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5193 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5194 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5194 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5195 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5195 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5196 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5291,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5291 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), 5292 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
5292 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 5293 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
5293 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), 5294 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
5295 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
5294 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5296 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
5295 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5297 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
5296 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5298 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 2ae9619443d1..1d651b8a8957 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
30 bool 30 bool
31 select SND_DMAENGINE_PCM 31 select SND_DMAENGINE_PCM
32 32
33config SND_SOC_TOPOLOGY
34 bool
35
33# All the supported SoCs 36# All the supported SoCs
34source "sound/soc/adi/Kconfig" 37source "sound/soc/adi/Kconfig"
35source "sound/soc/atmel/Kconfig" 38source "sound/soc/atmel/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index e189903fabf4..669648b41d30 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,9 @@
1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o 1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o 2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
3
4ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
3snd-soc-core-objs += soc-topology.o 5snd-soc-core-objs += soc-topology.o
6endif
4 7
5ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),) 8ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
6snd-soc-core-objs += soc-generic-dmaengine-pcm.o 9snd-soc-core-objs += soc-generic-dmaengine-pcm.o
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1fab9778807a..0450593980fd 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
638 int err = -ENODEV; 638 int err = -ENODEV;
639 639
640 down_read(&chip->shutdown_rwsem); 640 down_read(&chip->shutdown_rwsem);
641 if (chip->probing && chip->in_pm) 641 if (chip->probing || chip->in_pm)
642 err = 0; 642 err = 0;
643 else if (!chip->shutdown) 643 else if (!chip->shutdown)
644 err = usb_autopm_get_interface(chip->pm_intf); 644 err = usb_autopm_get_interface(chip->pm_intf);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index de165a1b9240..20b56eb987f8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
521 goto out_child; 521 goto out_child;
522 } 522 }
523 523
524 /*
525 * Normally perf_session__new would do this, but it doesn't have the
526 * evlist.
527 */
528 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
529 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
530 rec->tool.ordered_events = false;
531 }
532
524 if (!rec->evlist->nr_groups) 533 if (!rec->evlist->nr_groups)
525 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 534 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
526 535
@@ -965,9 +974,11 @@ static struct record record = {
965 .tool = { 974 .tool = {
966 .sample = process_sample_event, 975 .sample = process_sample_event,
967 .fork = perf_event__process_fork, 976 .fork = perf_event__process_fork,
977 .exit = perf_event__process_exit,
968 .comm = perf_event__process_comm, 978 .comm = perf_event__process_comm,
969 .mmap = perf_event__process_mmap, 979 .mmap = perf_event__process_mmap,
970 .mmap2 = perf_event__process_mmap2, 980 .mmap2 = perf_event__process_mmap2,
981 .ordered_events = true,
971 }, 982 },
972}; 983};
973 984
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ecf319728f25..6135cc07213c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
601 601
602static void display_setup_sig(void) 602static void display_setup_sig(void)
603{ 603{
604 signal(SIGSEGV, display_sig); 604 signal(SIGSEGV, sighandler_dump_stack);
605 signal(SIGFPE, display_sig); 605 signal(SIGFPE, sighandler_dump_stack);
606 signal(SIGINT, display_sig); 606 signal(SIGINT, display_sig);
607 signal(SIGQUIT, display_sig); 607 signal(SIGQUIT, display_sig);
608 signal(SIGTERM, display_sig); 608 signal(SIGTERM, display_sig);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddaee104c..d31fac19c30b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@ ifndef DESTDIR
638prefix ?= $(HOME) 638prefix ?= $(HOME)
639endif 639endif
640bindir_relative = bin 640bindir_relative = bin
641bindir = $(prefix)/$(bindir_relative) 641bindir = $(abspath $(prefix)/$(bindir_relative))
642mandir = share/man 642mandir = share/man
643infodir = share/info 643infodir = share/info
644perfexecdir = libexec/perf-core 644perfexecdir = libexec/perf-core
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7ff682770fdb..f1a4c833121e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
1387 event->fork.ptid); 1387 event->fork.ptid);
1388 int err = 0; 1388 int err = 0;
1389 1389
1390 if (dump_trace)
1391 perf_event__fprintf_task(event, stdout);
1392
1393 /*
1394 * There may be an existing thread that is not actually the parent,
1395 * either because we are processing events out of order, or because the
1396 * (fork) event that would have removed the thread was lost. Assume the
1397 * latter case and continue on as best we can.
1398 */
1399 if (parent->pid_ != (pid_t)event->fork.ppid) {
1400 dump_printf("removing erroneous parent thread %d/%d\n",
1401 parent->pid_, parent->tid);
1402 machine__remove_thread(machine, parent);
1403 thread__put(parent);
1404 parent = machine__findnew_thread(machine, event->fork.ppid,
1405 event->fork.ptid);
1406 }
1407
1390 /* if a thread currently exists for the thread id remove it */ 1408 /* if a thread currently exists for the thread id remove it */
1391 if (thread != NULL) { 1409 if (thread != NULL) {
1392 machine__remove_thread(machine, thread); 1410 machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
1395 1413
1396 thread = machine__findnew_thread(machine, event->fork.pid, 1414 thread = machine__findnew_thread(machine, event->fork.pid,
1397 event->fork.tid); 1415 event->fork.tid);
1398 if (dump_trace)
1399 perf_event__fprintf_task(event, stdout);
1400 1416
1401 if (thread == NULL || parent == NULL || 1417 if (thread == NULL || parent == NULL ||
1402 thread__fork(thread, parent, sample->time) < 0) { 1418 thread__fork(thread, parent, sample->time) < 0) {
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7bc852..2a5d8d7698ae 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
88 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 88 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
89 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 89 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
91 else if (perf_stat_evsel__is(counter, ELISION_START)) 91 else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
398 " # %5.2f%% aborted cycles ", 398 " # %5.2f%% aborted cycles ",
399 100.0 * ((total2-avg) / total)); 399 100.0 * ((total2-avg) / total));
400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) && 400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
401 avg > 0 &&
402 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 401 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
403 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 402 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
404 403
405 if (total) 404 if (avg)
406 ratio = total / avg; 405 ratio = total / avg;
407 406
408 fprintf(out, " # %8.0f cycles / transaction ", ratio); 407 fprintf(out, " # %8.0f cycles / transaction ", ratio);
409 } else if (perf_stat_evsel__is(evsel, ELISION_START) && 408 } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
410 avg > 0 &&
411 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 409 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
412 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 410 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
413 411
414 if (total) 412 if (avg)
415 ratio = total / avg; 413 ratio = total / avg;
416 414
417 fprintf(out, " # %8.0f cycles / elision ", ratio); 415 fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 28c4b746baa1..0a9ae8014729 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
191 if (thread->pid_ == parent->pid_) 191 if (thread->pid_ == parent->pid_)
192 return 0; 192 return 0;
193 193
194 if (thread->mg == parent->mg) {
195 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
196 thread->pid_, thread->tid, parent->pid_, parent->tid);
197 return 0;
198 }
199
194 /* But this one is new process, copy maps. */ 200 /* But this one is new process, copy maps. */
195 for (i = 0; i < MAP__NR_TYPES; ++i) 201 for (i = 0; i < MAP__NR_TYPES; ++i)
196 if (map_groups__clone(thread->mg, parent->mg, i) < 0) 202 if (map_groups__clone(thread->mg, parent->mg, i) < 0)